示例#1
0
        def __init__(self, in_dim, out_dim, hidden_dim=100):
            super(Model, self).__init__()
            self.in_dim, self.out_dim = in_dim, out_dim
            self.hidden_dim = hidden_dim

            self.emb = cls(self.num_words, self.hidden_dim)
            self.pool = torchmodels.Linear(
                in_features=self.hidden_dim * 3,
                out_features=self.hidden_dim
            )
            self.in_linear = torchmodels.Linear(self.in_dim, self.hidden_dim)
            self.out_linear = torchmodels.Linear(self.hidden_dim, self.out_dim)
示例#2
0
    def __init__(self, *args, hidden_dim=300, num_layers=2,
                 nonlinear=nonlinear.AbstractNonlinear, **kwargs):
        super(MLP, self).__init__(*args, **kwargs)
        self.hidden_dim = hidden_dim
        self.num_layers = num_layers
        self.nonlinear_cls = nonlinear

        self.input_layer = torchmodels.Linear(self.input_dim, self.hidden_dim)
        self.hidden_layers = torchmodels.Sequential(
            *[self.nonlinear_cls(self.hidden_dim, self.hidden_dim)
              for _ in range(self.num_layers)]
        )
        self.output_layer = torchmodels.Linear(self.hidden_dim, self.output_dim)
示例#3
0
    def __init__(self, *args, hidden_dim=100, **kwargs):
        super().__init__(*args, **kwargs)
        self.hidden_dim = hidden_dim

        self.input_layer = torchmodels.Sequential(
            torchmodels.Linear(in_features=self.input_dim,
                               out_features=self.asv_dim), )
        # speaker mask is a [num_speakers x num asv] BoolTensor
        # that indicates active asvs for each speaker
        self.speaker_asv_mask = nn.Parameter(self._create_speaker_asv_mask(),
                                             requires_grad=False)
示例#4
0
    def __init__(self, *args, num_layers=1, dropout=0.0, **kwargs):
        super(LSTMDecodingRNN, self).__init__(*args, **kwargs)
        self.num_layers = num_layers
        self.dropout = dropout

        self.init_layer_c = torchmodels.Linear(
            in_features=self.init_dim,
            out_features=self.num_layers * self.hidden_dim
        )
        self.init_layer_h = torchmodels.Linear(
            in_features=self.init_dim,
            out_features=self.num_layers * self.hidden_dim
        )
        self.lstm = nn.LSTM(
            input_size=self.input_dim,
            hidden_size=self.hidden_dim,
            num_layers=self.num_layers,
            bidirectional=False,
            dropout=self.dropout
        )
示例#5
0
    def __init__(self,
                 *args,
                 rnn_dim=200,
                 decoding_rnn=AbstractDecodingRNN,
                 **kwargs):
        super(RNNSentDecoder, self).__init__(*args, **kwargs)
        self.rnn_dim = rnn_dim
        self.rnn_cls = decoding_rnn

        self.rnn = self.rnn_cls(input_dim=self.word_dim,
                                init_dim=self.hidden_dim,
                                hidden_dim=self.rnn_dim)
        self.linear = torchmodels.Linear(in_features=self.rnn_dim,
                                         out_features=self.word_dim)
示例#6
0
 def __init__(self, *args, **kwargs):
     super(FunctionalNonlinear, self).__init__(*args, **kwargs)
     self.linear = torchmodels.Linear(self.in_dim, self.out_dim)
     self.func = self.get_func()