Example #1
0
 def __init__(self):
     super(CnnModel, self).__init__()
     # in_channel == number of features (1 in b/w images, 3 in colored images, 45 in our case)
     # out_channel == number of filters to learn
     # kernel_size == window/filter size
     # padding = kernel_size / 2 - 1 to conserve sequence length
     self.conv1 = Conv1d(in_channels=45,
                         out_channels=16,
                         kernel_size=9,
                         padding=4)
     # 8 output categories
     self.conv2 = Conv1d(in_channels=16,
                         out_channels=8,
                         kernel_size=3,
                         padding=1)
Example #2
0
    def __init__(
            self,
            embedding_dim: int,
            num_filters: int,
            ngram_filter_sizes: Tuple[int, ...] = (2, 3, 4, 5),  # pylint: disable=bad-whitespace
            conv_layer_activation=torch.nn.functional.relu,
            output_dim: Optional[int] = None) -> None:
        super(CnnEncoder, self).__init__()
        self._embedding_dim = embedding_dim
        self._num_filters = num_filters
        self._ngram_filter_sizes = ngram_filter_sizes
        self._activation = conv_layer_activation
        self._output_dim = output_dim

        self._convolution_layers = [
            Conv1d(in_channels=self._embedding_dim,
                   out_channels=self._num_filters,
                   kernel_size=ngram_size)
            for ngram_size in self._ngram_filter_sizes
        ]
        for i, conv_layer in enumerate(self._convolution_layers):
            self.add_module('conv_layer_%d' % i, conv_layer)

        maxpool_output_dim = self._num_filters * len(self._ngram_filter_sizes)
        if self._output_dim:
            self.projection_layer = Linear(maxpool_output_dim,
                                           self._output_dim)
        else:
            self.projection_layer = None
            self._output_dim = maxpool_output_dim
Example #3
0
    def __init__(self, n_in_channels, n_out_channels, n_blocks,
                 n_init_features, growth_rate, drop_rate, kernel_internal,
                 glu_act):
        super(DenseDeep1D_incept, self).__init__()

        self.n_blocks = n_blocks
        self.init_features = torch.nn.ModuleList()
        for conv_level, vals in enumerate(n_init_features):
            if vals > 0:
                self.init_features.append(
                    Conv1d(n_in_channels,
                           vals,
                           kernel_size=1 + 2 * conv_level,
                           padding_mode='zeros',
                           padding=conv_level))
        total_init_features = sum(n_init_features)
        self.norm0 = LayerNorm([total_init_features, 107])
        self.act0 = ReLU(inplace=True)

        self.features = torch.nn.Sequential(OrderedDict())
        for k_block in range(n_blocks):
            self.features.add_module(
                'block_{}'.format(k_block),
                _DenseConvBlock(total_init_features + k_block * growth_rate,
                                growth_rate=growth_rate,
                                drop_rate=drop_rate,
                                kernel_size=kernel_internal,
                                glu_act=glu_act))

        self.act_final = ReLU(inplace=True)
        self.regression = Linear(total_init_features + n_blocks * growth_rate,
                                 n_out_channels)
Example #4
0
    def __init__(self,
                 in_size,
                 out_size,
                 num_layers,
                 kernel_size,
                 dilation,
                 input_in_rnn_format=False,
                 batch_norm=False):
        super().__init__()

        self.input_in_rnn_format = input_in_rnn_format
        self.sequential = Sequential()

        for i in range(num_layers):
            conv = Conv1d(in_channels=in_size,
                          out_channels=out_size,
                          kernel_size=kernel_size,
                          dilation=dilation,
                          bias=not batch_norm)
            self.sequential.add_module('cnn_c_%s' % i, conv)
            non_lin = ReLU()
            self.sequential.add_module('non_lin_%s' % i, non_lin)
            if batch_norm:
                bnn = BatchNorm1d(out_size)
                self.sequential.add_module('bnn_c_%s' % i, bnn)
            in_size = out_size
 def __init__(self, num_channels):
     super().__init__()
     self.num_channels = num_channels
     self.conv1_input = Conv1d(self.num_channels,
                               self.num_channels,
                               kernel_size=3,
                               padding=1)
     self.relu = ReLU()
     self.conv1 = Conv1d(self.num_channels, 32, kernel_size=3)
     self.conv2 = Conv1d(32, 32, kernel_size=3)
     self.pool1 = MaxPool1d(kernel_size=2)
     self.conv3 = Conv1d(32, 16, kernel_size=3)
     self.flatten = Flatten()
     self.linear1 = Linear(16, 100)
     self.linear2 = Linear(100, 50)
     self.linear3 = Linear(50, 1)
    def __init__(self,
                 input_dim: int,
                 num_filters: int,
                 ngram_filter_sizes: Tuple[int, ...],
                 padding_sizes: Tuple[int, ...],
                 keep_prob: float = .8,
                 conv_layer_activation=F.relu,
                 output_dim: Optional[int] = None) -> None:
        super(SelfAwareEmbedding, self).__init__()
        self.input_dim = input_dim
        self.num_filters = num_filters
        self.ngram_filter_sizes = ngram_filter_sizes
        self.padding_sizes = padding_sizes
        self.activation = conv_layer_activation
        self.output_dim = output_dim

        self.dropout = nn.Dropout(1 - keep_prob)
        self.convolution_layers = [
            Conv1d(in_channels=self.input_dim,
                   out_channels=self.num_filters,
                   kernel_size=ngram_size,
                   padding=padding) for ngram_size, padding in zip(
                       self.ngram_filter_sizes, self.padding_sizes)
        ]

        for i, conv_layer in enumerate(self.convolution_layers):
            self.add_module('conv_layer_%d' % i, conv_layer)

        maxpool_output_dim = self.num_filters * len(self.ngram_filter_sizes)
        if self.output_dim:
            self.projection_layer = Linear(maxpool_output_dim, self.output_dim)
        else:
            self.projection_layer = None
            self.output_dim = maxpool_output_dim
Example #7
0
    def __init__(self,
                 in_channels=2,
                 num_layers=5,
                 filter_width=11,
                 channels=[16, 32, 64, 128, 256],
                 dropout=0.3,
                 rate=44100,
                 duration=0.5,
                 flat_dim=512):

        assert num_layers == len(channels)
        super(ConvTwin, self).__init__()

        extra_px = filter_width // 2
        channels = [in_channels] + channels

        total_len = int(2**np.floor(np.log2(rate * duration)))

        conv_block = lambda in_ch, out_ch, p=dropout, fw=filter_width, pd=extra_px: nn.Sequential(
            Conv1d(in_ch, out_ch, kernel_size=fw, padding=pd),
            BatchNorm1d(out_ch), ReLU(), Dropout(p=p), MaxPool1d(kernel_size=2)
        )

        convs = []

        for i in range(1, len(channels)):
            convs.append(conv_block(channels[i - 1], channels[i]))

        self.dense = Linear(total_len * channels[-1] // (2**len(convs)),
                            flat_dim)
        self.L = total_len

        self.convs = convs
        self.convs = nn.Sequential(*self.convs)
Example #8
0
    def __init__(self,
                 embedding_dim,
                 num_filters,
                 ngram_filter_sizes=(2, 3, 4, 5),
                 conv_layer_activation=ReLU(),
                 output_dim=None):
        super(CNNEncoder, self).__init__()
        self._embedding_dim = embedding_dim
        self._num_filters = num_filters
        self._ngram_filter_sizes = ngram_filter_sizes
        self._activation = conv_layer_activation
        self._output_dim = output_dim

        self._convolution_layers = [
            Conv1d(in_channels=self._embedding_dim,
                   out_channels=self._num_filters,
                   kernel_size=ngram_size)
            for ngram_size in self._ngram_filter_sizes
        ]
        for i, conv_layer in enumerate(self._convolution_layers):
            self.add_module('conv_layer_%d' % i, conv_layer)

        maxpool_output_dim = self._num_filters * len(self._ngram_filter_sizes)
        if self._output_dim:
            self.projection_layer = Linear(maxpool_output_dim,
                                           self._output_dim)
        else:
            self.projection_layer = None
            self._output_dim = maxpool_output_dim
  def __init__(self,batch_size, inputs, outputs):
    # initilize the super class & store the parameters
    super(cnnRegressor,self).__init__()
    self.batch_size= batch_size
    self.inputs= inputs
    self_outputs= outputs

    # define input  layers (input channels, output channels, kernel size)
    self.input_layer = Conv1d(inputs, batch_size,1)
    #(kernel size)
    self.max_pooling_layer= MaxPool1d(1)
    self.conv_layer= Conv1d(batch_size, 128,1)
    self.flatten_layer=Flatten()
    #(inputs, outputs)
    self.linear_layer= Linear(128,64)
    self.output_layer= Linear(64, outputs)
Example #10
0
def conv(c_in, c_out, ks, stride=1, bias=False, dilation=1, groups=1):
    if stride > 1 and dilation > 1:
        raise ValueError("Dilation and stride can not both be greater than 1")
    return Conv1d(
        c_in, c_out, ks, stride=stride, padding=(ks // 2) * dilation,
        bias=bias, dilation=dilation, groups=groups
    )
Example #11
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 slope=0.01,
                 kern_size=5,
                 max_pool=2,
                 eps=1e-05,
                 momentum=0.1,
                 use_skip=True):
        super(DoubleConvConcatAndDilate, self).__init__()
        padding = kern_size // 2
        transpose_padding = padding - 1

        # we need to account for the skip connection
        if use_skip:
            in_channels += in_channels

        self.conv1 = ConvTranspose1d(in_channels,
                                     out_channels,
                                     stride=max_pool,
                                     kernel_size=kern_size,
                                     padding=transpose_padding)

        self.batch_norm_1 = BatchNorm1d(out_channels)
        self.relu_1 = LeakyReLU(negative_slope=slope)

        self.conv2 = Conv1d(out_channels,
                            out_channels,
                            kernel_size=kern_size,
                            padding=padding)

        self.batch_norm_2 = BatchNorm1d(out_channels)
        self.relu_2 = LeakyReLU(negative_slope=slope)
Example #12
0
    def __init__(self,
                 hidden_dim=512,
                 embedding_dim=256,
                 vocab_size=10000,
                 num_layers=10,
                 kernel_size=3,
                 dropout=0.25,
                 PAD_token=0,
                 max_len=50):
        super().__init__()
        self.kernel_size = kernel_size
        self.PAD_token = PAD_token
        self.num_layers = num_layers
        self.vocab_size = vocab_size
        self.max_len = max_len

        self.token_embedding = Embedding(vocab_size, embedding_dim)
        self.position_embedding = Embedding(max_len, embedding_dim)

        self.embedd2hidden = Linear(embedding_dim, hidden_dim)
        self.hidden2embedd = Linear(hidden_dim, embedding_dim)

        self.attention_layer = Attention(embedding_dim, hidden_dim)
        self.decoder_conv = DecoderConv(kernel_size, dropout, PAD_token)
        self.convs = ModuleList([
            Conv1d(in_channels=hidden_dim,
                   out_channels=2 * hidden_dim,
                   kernel_size=kernel_size) for _ in range(num_layers)
        ])

        self.out = Linear(embedding_dim, vocab_size)

        self.dropout = Dropout(dropout)
Example #13
0
 def __init__(self):
     super(Generator, self).__init__()
     self.conv_pre = weight_norm(Conv1d(80, 512, 7, 1, padding=3))
     self.ups = nn.ModuleList([
         weight_norm(ConvTranspose1d(512, 256, 16, 8, padding=4)),
         weight_norm(ConvTranspose1d(256, 128, 16, 8, padding=4)),
         weight_norm(ConvTranspose1d(128, 64, 4, 2, padding=1)),
         weight_norm(ConvTranspose1d(64, 32, 4, 2, padding=1))
     ])
     self.resblocks = nn.ModuleList([
         ResBlock(256, 256),
         ResBlock(128, 128),
         ResBlock(64, 64),
         ResBlock(32, 32)
     ])
     self.conv_post = weight_norm(Conv1d(32, 1, 7, 1, padding=3))
Example #14
0
    def __init__(self,
                 input_channels,
                 output_channels,
                 kernel,
                 dropout=0.0,
                 activation='identity',
                 dilation=1,
                 groups=1,
                 batch_norm=True):
        super(ConvBlock, self).__init__()

        self._groups = groups

        p = (kernel - 1) * dilation // 2
        padding = p if kernel % 2 != 0 else (p, p + 1)
        layers = [
            ConstantPad1d(padding, 0.0),
            Conv1d(input_channels,
                   output_channels,
                   kernel,
                   padding=0,
                   dilation=dilation,
                   groups=groups,
                   bias=(not batch_norm))
        ]

        if batch_norm:
            layers += [BatchNorm1d(output_channels)]

        layers += [get_activation(activation)]
        layers += [Dropout(dropout)]

        self._block = Sequential(*layers)
Example #15
0
    def __init__(self, channels, kernel, reduction=2, use_hard_sigmoid=False):
        """
        Channel-wise attention module, Squeeze-and-Excitation Networks Jie Hu1, Li Shen, Gang Sun - https://arxiv.org/pdf/1709.01507v2.pdf
        :param channels: Number of input channels
        :param reduction: Reduction factor for the number of hidden units
        """
        super(_ChannelAttentionModule, self).__init__()

        if use_hard_sigmoid:
            act_type = "hard_sigmoid"
        else:
            act_type = "sigmoid"
        self.avg_pool = AdaptiveAvgPool2d(1)
        self.body = Sequential(
            Conv1d(in_channels=channels,
                   out_channels=channels,
                   kernel_size=kernel,
                   padding=kernel // 2,
                   stride=1,
                   bias=True), get_act("sigmoid"))

        self.fc = Sequential(
            Linear(channels, channels // reduction, bias=False),
            ReLU(inplace=True),
            Linear(channels // reduction, channels, bias=False),
            get_act(act_type))
Example #16
0
 def __init__(self,
              L=32,
              W=np.array([11] * 8 + [21] * 4 + [41] * 4),
              AR=np.array([1] * 4 + [4] * 4 + [10] * 4 + [25] * 4)):
     super().__init__()
     self.CL = 2 * (AR * (W - 1)).sum()  # context length
     self.conv1 = Conv1d(4, L, 1)
     self.skip1 = Skip(L)
     self.residual_blocks = ModuleList()
     for i, (w, r) in enumerate(zip(W, AR)):
         self.residual_blocks.append(ResidualUnit(L, w, r))
         if (i + 1) % 4 == 0:
             self.residual_blocks.append(Skip(L))
     if (len(W) + 1) % 4 != 0:
         self.residual_blocks.append(Skip(L))
     self.last = Conv1d(L, 3, 1)
Example #17
0
 def __init__(self, args):
     self.k = 10
     self.hidden1 = is_in_args(args, 'hidden1', 32)
     self.hidden2 = self.hidden1 // 4
     self.hidden_fcn = is_in_args(args, 'hidden_fcn', 32)
     use_bn = args.constant_size & (args.batch_size > 8)
     super(Conan, self).__init__()
     self.continuous_clusters = Sequential(
         Conv1d_bn(in_channels=args.feature_depth,
                   out_channels=self.hidden1,
                   dropout=args.dropout,
                   use_bn=use_bn),
         Conv1d_bn(in_channels=self.hidden1,
                   out_channels=self.hidden2,
                   dropout=args.dropout,
                   use_bn=use_bn),
         Conv1d_bn(in_channels=self.hidden2,
                   out_channels=self.hidden1,
                   dropout=args.dropout,
                   use_bn=use_bn),
     )
     self.weights = Sequential(
         Conv1d(in_channels=self.hidden1, out_channels=1, kernel_size=1),
         ReLU())
     self.classifier = Sequential(
         Dense_bn(in_channels=(self.hidden1 + 1) * 2 * self.k +
                  self.hidden1,
                  out_channels=self.hidden_fcn,
                  dropout=args.dropout,
                  use_bn=use_bn),
         Dense_bn(in_channels=self.hidden_fcn,
                  out_channels=self.hidden_fcn,
                  dropout=args.dropout,
                  use_bn=use_bn),
         Linear(in_features=self.hidden_fcn, out_features=1), Sigmoid())
Example #18
0
    def __init__(self,
                 embedding_dim: int,
                 num_filters: int,
                 ngram_filter_sizes: Tuple[int, ...] = (2, 3, 4, 5),  # pylint: disable=bad-whitespace
                 ) -> None:
        super(CnnSeq2SeqEncoder, self).__init__()
        self._embedding_dim = embedding_dim
        self._num_filters = num_filters
        self._ngram_filter_sizes = ngram_filter_sizes

        # 确认卷积核的大小是基数
        for ngram_size in self._ngram_filter_sizes:
            assert ngram_size % 2 == 1
        """
        torch.nn.Conv1d(last_dim, layer[1] * 2, layer[0],
                                           stride=1, padding=layer[0] - 1, bias=True)"""
        self._convolution_layers = [Conv1d(in_channels=self._embedding_dim,
                                           out_channels=self._num_filters,
                                           padding = ngram_size // 2,
                                           kernel_size=ngram_size)
                                    for ngram_size in self._ngram_filter_sizes]

        for i, conv_layer in enumerate(self._convolution_layers):
            self.add_module('conv_layer_%d' % i, conv_layer)

        # 多少个卷积核 * 卷积类型数目
        self._output_dim = self._num_filters * len(self._ngram_filter_sizes)
Example #19
0
    def __init__(self,
                 input_dim: int,
                 num_filters: int,
                 ngram_filter_sizes: Tuple[int, ...],
                 dropout: float = .0,
                 conv_layer_activation = F.relu,
                 output_dim: Optional[int] = None) -> None:
        super(CNNEncoder, self).__init__()
        self.input_dim = input_dim
        self.num_filters = num_filters
        self.ngram_filter_sizes = ngram_filter_sizes
        self.activation = conv_layer_activation
        self.output_dim = output_dim

        self.dropout = nn.Dropout(dropout)
        self.convolution_layers = [Conv1d(in_channels=self.input_dim,
                                           out_channels=self.num_filters,
                                           kernel_size=ngram_size)
                                    for ngram_size in self.ngram_filter_sizes]

        for i, conv_layer in enumerate(self.convolution_layers):
            self.add_module('conv_layer_%d' % i, conv_layer)

        maxpool_output_dim = self.num_filters * len(self.ngram_filter_sizes)
        if self.output_dim:
            self.projection_layer = Linear(maxpool_output_dim, self.output_dim)
        else:
            self.projection_layer = None
            self.output_dim = maxpool_output_dim
Example #20
0
    def __init__(
            self,
            embedding_dim: int,
            num_filters: int,
            ngram_filter_sizes: Tuple[int, ...] = (2, 3, 4, 5),  # pylint: disable=bad-whitespace
            conv_layer_activation: Activation = None,
            output_dim: Optional[int] = None) -> None:
        super(ExplainableCnnEncoder, self).__init__()
        self._embedding_dim = embedding_dim
        self._num_filters = num_filters
        self._ngram_filter_sizes = ngram_filter_sizes
        self._activation = conv_layer_activation or Activation.by_name(
            'relu')()
        self._output_dim = output_dim

        self._convolution_layers = [(Conv1d(in_channels=self._embedding_dim,
                                            out_channels=self._num_filters,
                                            kernel_size=ngram_size),
                                     MaxPool1dAll(kernel_size=None))
                                    for ngram_size in self._ngram_filter_sizes]
        for i, (conv_layer,
                maxpool_layer) in enumerate(self._convolution_layers):
            self.add_module('conv_layer_%d' % i, conv_layer)
            self.add_module('maxpool_layer_%d' % i, maxpool_layer)

        maxpool_output_dim = self._num_filters * len(self._ngram_filter_sizes)
        if self._output_dim:
            self.projection_layer = Linear(maxpool_output_dim,
                                           self._output_dim)
        else:
            self.projection_layer = None
            self._output_dim = maxpool_output_dim
Example #21
0
    def __init__(
        self,
        embedding_dim: int,
        num_filters: int,
        ngram_filter_sizes: Tuple[int, ...] = (2, 3, 4, 5),
        conv_layer_activation: Activation = None,
        output_dim: Optional[int] = None,
    ) -> None:
        super().__init__()
        self._embedding_dim = embedding_dim
        self._num_filters = num_filters
        self._ngram_filter_sizes = ngram_filter_sizes
        self._activation = conv_layer_activation or Activation.by_name(
            "relu")()

        self._convolution_layers = [
            Conv1d(
                in_channels=self._embedding_dim,
                out_channels=self._num_filters,
                kernel_size=ngram_size,
            ) for ngram_size in self._ngram_filter_sizes
        ]
        for i, conv_layer in enumerate(self._convolution_layers):
            self.add_module("conv_layer_%d" % i, conv_layer)

        maxpool_output_dim = self._num_filters * len(self._ngram_filter_sizes)
        if output_dim:
            self.projection_layer = Linear(maxpool_output_dim, output_dim)
            self._output_dim = output_dim
        else:
            self.projection_layer = None
            self._output_dim = maxpool_output_dim
    def __init__(
            self,
            embedding_dim: int,
            num_filters: int,
            ngram_filter_sizes: Tuple[int, ...] = (2, 3, 4, 5),  # pylint: disable=bad-whitespace
            conv_layer_activation: Activation = None,
            output_dim: Optional[int] = None) -> None:
        super(CnnEncoder, self).__init__()
        self._embedding_dim = embedding_dim
        self._num_filters = num_filters
        self._ngram_filter_sizes = ngram_filter_sizes
        self._activation = conv_layer_activation or Activation.by_name(
            'relu')()
        self._output_dim = output_dim

        self._convolution_layers = [
            Conv1d(in_channels=self._embedding_dim,
                   out_channels=self._num_filters,
                   kernel_size=ngram_size)
            for ngram_size in self._ngram_filter_sizes
        ]
        for i, conv_layer in enumerate(self._convolution_layers):
            self.add_module('conv_layer_%d' % i, conv_layer)

        self._output_dim = self._num_filters * len(self._ngram_filter_sizes)
Example #23
0
    def __init__(self,
                 embedding_dim     ,
                 num_filters     ,
                 ngram_filter_sizes                  = (2, 3, 4, 5),  # pylint: disable=bad-whitespace
                 conv_layer_activation             = None,
                 output_dim                = None)        :
        super(CnnEncoder, self).__init__()
        self._embedding_dim = embedding_dim
        self._num_filters = num_filters
        self._ngram_filter_sizes = ngram_filter_sizes
        self._activation = conv_layer_activation or Activation.by_name(u'relu')()
        self._output_dim = output_dim

        self._convolution_layers = [Conv1d(in_channels=self._embedding_dim,
                                           out_channels=self._num_filters,
                                           kernel_size=ngram_size)
                                    for ngram_size in self._ngram_filter_sizes]
        for i, conv_layer in enumerate(self._convolution_layers):
            self.add_module(u'conv_layer_%d' % i, conv_layer)

        maxpool_output_dim = self._num_filters * len(self._ngram_filter_sizes)
        if self._output_dim:
            self.projection_layer = Linear(maxpool_output_dim, self._output_dim)
        else:
            self.projection_layer = None
            self._output_dim = maxpool_output_dim
Example #24
0
    def __init__(
        self,
        embedding_dim,
        num_filters=100,
        ngram_filter_sizes=(2, 3, 4, 5),
        conv_layer_activation="relu",
        output_dim=None,
    ):
        """TODO: to be defined1. """
        nn.Module.__init__(self)

        self.embedding_dim = embedding_dim
        self.num_filters = num_filters
        self.ngram_filter_sizes = ngram_filter_sizes
        self.conv_layer_activation = torch.nn.functional.relu
        self.output_dim = output_dim

        self.convolution_layers = nn.ModuleList()
        for ngram_size in self.ngram_filter_sizes:
            self.convolution_layers.append(
                Conv1d(
                    in_channels=self.embedding_dim,
                    out_channels=self.num_filters,
                    kernel_size=ngram_size,
                ))

        maxpool_output_dim = self.num_filters * len(self.ngram_filter_sizes)
        if self.output_dim:
            self.projection_layer = Linear(maxpool_output_dim, self.output_dim)
        else:
            self.projection_layer = None
            self.output_dim = maxpool_output_dim
Example #25
0
    def __init__(self, feature_extractor_params: ClonableModule, k, arch):
        """
        In the constructor we instantiate the snail module
        """
        super(SNAILNetwork, self).__init__()
        self.feature_extractor = FeaturesExtractorFactory()(
            **feature_extractor_params)
        self.arch = arch
        input_dim = self.feature_extractor.output_dim + 1

        layers = []
        for i, (layer_type, layer_infos) in enumerate(arch):
            if layer_type == 'att':
                key_size, value_size = layer_infos
                layer = AttentionBlock(input_dim, key_size, value_size)
                input_dim = input_dim + value_size
            elif layer_type == 'tc':
                kernel_size = layer_infos
                layer = TCBlock(input_dim, k + 1, kernel_size)
                input_dim = layer.output_dim
            else:
                raise Exception(
                    "Impossible to create that type of layer in this model")
            layers.append(layer)
        layers.append(Transpose(1, 2))
        layers.append(Conv1d(input_dim, 1, 1))
        layers.append(Transpose(1, 2))
        self.net = Sequential(*layers)
    def __init__(self, params):
        super(VerticalAttention, self).__init__()
        self.att_fc_size = params["att_fc_size"]
        self.features_size = params["features_size"]
        self.use_location = params["use_location"]
        self.use_coverage_vector = params["use_coverage_vector"]
        self.coverage_mode = params["coverage_mode"]
        self.use_hidden = params["use_hidden"]
        self.min_height = params["min_height_feat"]
        self.min_width = params["min_width_feat"]
        self.stop_mode = params["stop_mode"]

        self.ada_pool = AdaptiveMaxPool2d((None, self.min_width))
        self.dense_width = Linear(self.min_width, 1)

        self.dense_enc = Linear(self.features_size, self.att_fc_size)
        self.dense_align = Linear(self.att_fc_size, 1)

        if self.stop_mode == "learned":
            self.ada_pool_height = AdaptiveMaxPool1d(self.min_height)
            self.conv_decision = Conv1d(self.att_fc_size,
                                        self.att_fc_size,
                                        kernel_size=5,
                                        padding=2)
            self.dense_height = Linear(self.min_height, 1)
            if self.use_hidden:
                self.dense_decision = Linear(
                    params["hidden_size"] + self.att_fc_size, 2)
            else:
                self.dense_decision = Linear(self.att_fc_size, 2)
        in_ = 0
        if self.use_location:
            in_ += 1
        if self.use_coverage_vector:
            in_ += 1

        self.norm = InstanceNorm1d(in_, track_running_stats=False)
        self.conv_block = Conv1d(in_, 16, kernel_size=15, padding=7)
        self.dense_conv_block = Linear(16, self.att_fc_size)

        if self.use_hidden:
            self.hidden_size = params["hidden_size"]
            self.dense_hidden = Linear(self.hidden_size, self.att_fc_size)

        self.dropout = Dropout(params["att_dropout"])

        self.h_features = None
Example #27
0
    def __init__(
        self,
        resblock,
        upsample_rates,
        upsample_kernel_sizes,
        upsample_initial_channel,
        resblock_kernel_sizes,
        resblock_dilation_sizes,
        initial_input_size=80,
        apply_weight_init_conv_pre=False,
    ):
        super().__init__()
        self.num_kernels = len(resblock_kernel_sizes)
        self.num_upsamples = len(upsample_rates)
        self.conv_pre = weight_norm(
            Conv1d(initial_input_size,
                   upsample_initial_channel,
                   7,
                   1,
                   padding=3))
        resblock = ResBlock1 if resblock == 1 else ResBlock2

        self.ups = nn.ModuleList()
        for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
            self.ups.append(
                weight_norm(
                    ConvTranspose1d(
                        upsample_initial_channel // (2**i),
                        upsample_initial_channel // (2**(i + 1)),
                        k,
                        u,
                        padding=(k - u) // 2,
                    )))

        self.resblocks = nn.ModuleList()
        for i in range(len(self.ups)):
            ch = upsample_initial_channel // (2**(i + 1))
            for j, (k, d) in enumerate(
                    zip(resblock_kernel_sizes, resblock_dilation_sizes)):
                self.resblocks.append(resblock(ch, k, d))

        self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))
        self.ups.apply(init_weights)
        self.conv_post.apply(init_weights)
        if apply_weight_init_conv_pre:
            self.conv_pre.apply(init_weights)
Example #28
0
 def __init__(self, up_ratio=4):
     super(up_projection_unit, self).__init__()
     self.conv1 = nn.Sequential(
         Conv1d(in_channels=648, out_channels=128, kernel_size=1),
         nn.ReLU())
     self.up_block1 = up_block(up_ratio=4, in_channels=128 + 2)
     self.up_block2 = up_block(up_ratio=4, in_channels=128 + 2)
     self.down_block = down_block(up_ratio=4, in_channels=128)
Example #29
0
    def __init__(self, in_channels, out_channels, stride, kernel_size,
                 nb_filters, bottleneck_size, use_bottleneck):
        super().__init__()

        self.bottleneck_size = bottleneck_size
        self.stride = stride
        self.kernel_size = kernel_size
        self.nb_filters = nb_filters
        self.use_bottleneck = use_bottleneck

        self.input = Identity()

        self.bottleneckInput = Conv1d(in_channels,
                                      self.bottleneck_size,
                                      kernel_size=1,
                                      padding=0,
                                      bias=False)

        # kernel_size_s = [3, 5, 8, 11, 17]
        kernel_size_s = [self.kernel_size // (2**i) for i in range(3)]
        #print("kernel size");
        #print(kernel_size_s)

        self.conv_list = []
        self.temp_input = self.bottleneck_size
        for i in range(len(kernel_size_s)):
            setattr(
                self, "cov_parallel_%d" % kernel_size_s[i],
                Conv1d(self.temp_input,
                       self.nb_filters,
                       kernel_size=kernel_size_s[i],
                       stride=self.stride,
                       padding=0,
                       bias=False))
            self.temp_input = self.nb_filters
            self.conv_list.append(
                getattr(self, "cov_parallel_%d" % kernel_size_s[i]))
        self.max_pool = MaxPool1d(kernel_size=3, stride=self.stride, padding=0)

        self.conv = Conv1d(self.bottleneck_size,
                           self.nb_filters,
                           kernel_size=1,
                           padding=0,
                           bias=False)

        self.bn = BatchNorm1d(self.nb_filters)
Example #30
0
    def __init__(
        self, in_channels, out_channels, dim, kernel_size, hidden_channels=None, dilation=1, bias=True, **kwargs,
    ):
        super(XConv, self).__init__()

        self.in_channels = in_channels
        if hidden_channels is None:
            hidden_channels = in_channels // 4
        assert hidden_channels > 0
        self.hidden_channels = hidden_channels
        self.out_channels = out_channels
        self.dim = dim
        self.kernel_size = kernel_size
        self.dilation = dilation
        self.kwargs = kwargs

        C_in, C_delta, C_out = in_channels, hidden_channels, out_channels
        D, K = dim, kernel_size

        self.mlp1 = S(
            L(dim, C_delta), ELU(), BN(C_delta), L(C_delta, C_delta), ELU(), BN(C_delta), Reshape(-1, K, C_delta),
        )

        self.mlp2 = S(
            L(D * K, K ** 2),
            ELU(),
            BN(K ** 2),
            Reshape(-1, K, K),
            Conv1d(K, K ** 2, K, groups=K),
            ELU(),
            BN(K ** 2),
            Reshape(-1, K, K),
            Conv1d(K, K ** 2, K, groups=K),
            BN(K ** 2),
            Reshape(-1, K, K),
        )

        C_in = C_in + C_delta
        depth_multiplier = int(ceil(C_out / C_in))
        self.conv = S(
            Conv1d(C_in, C_in * depth_multiplier, K, groups=C_in),
            Reshape(-1, C_in * depth_multiplier),
            L(C_in * depth_multiplier, C_out, bias=bias),
        )

        self.reset_parameters()