示例#1
0
    def __init__(self, vocab, annotation_dropout, prediction_dropout):
        super().__init__(vocab)

        embedding_size = vocab.vectors.shape[1]

        self.rnn = nn.GRU(input_size=embedding_size,
                          hidden_size=embedding_size,
                          bidirectional=True,
                          batch_first=True)

        for name, param in self.rnn.named_parameters():
            if name.startswith('weight_ih_'):
                nn.init.xavier_uniform(param)
            elif name.startswith('weight_hh_'):
                nn.init.orthogonal(param)
            elif name.startswith('bias_'):
                nn.init.constant(param, 0.0)

        self.annotation = base.Dense(3 * embedding_size,
                                     embedding_size,
                                     hidden_layers=1,
                                     hidden_nonlinearity='relu',
                                     output_nonlinearity='tanh',
                                     dropout=annotation_dropout)

        self.label_vectors = nn.Parameter(
            torch.zeros(len(common.LABELS), embedding_size))
        nn.init.uniform(self.label_vectors, -1, 1)

        self.prediction = base.Dense(embedding_size,
                                     1,
                                     output_nonlinearity='sigmoid',
                                     dropout=prediction_dropout)
示例#2
0
    def __init__(self, vocab, rnn_size, rnn_layers, rnn_dropout, dense_layers,
                 dense_nonlinearily, dense_dropout):
        super().__init__(vocab)

        h0 = torch.zeros(2 * rnn_layers, 1, rnn_size)
        self.rnn_h0 = nn.Parameter(h0, requires_grad=True)

        self.rnn = nn.LSTM(input_size=vocab.vectors.shape[1],
                           hidden_size=rnn_size,
                           num_layers=rnn_layers,
                           bidirectional=True,
                           batch_first=True)

        for name, param in self.rnn.named_parameters():
            if name.startswith('weight_ih_'):
                nn.init.xavier_uniform(param)
            elif name.startswith('weight_hh_'):
                nn.init.orthogonal(param)
            elif name.startswith('bias_'):
                nn.init.constant(param, 0.0)

        if rnn_dropout:
            weights = ['weight_hh_l{}'.format(k) for k in range(rnn_layers)]
            self.rnn = base.WeightDrop(self.rnn, weights, dropout=rnn_dropout)

        self.dense = base.Dense(2 * rnn_size,
                                len(common.LABELS),
                                output_nonlinearity='sigmoid',
                                hidden_layers=dense_layers,
                                hidden_nonlinearity=dense_nonlinearily,
                                dropout=dense_dropout)
示例#3
0
    def __init__(self, vocab, conv_blocks, conv_dropout, dense_layers, dense_nonlinearily, dense_dropout):
        super().__init__(vocab)

        self.conv_blocks = conv_blocks
        channels = vocab.vectors.shape[1]
        for k in range(1, conv_blocks + 1):
            setattr(self, f'conv_block{k}', ConvBlock(channels, conv_dropout))

        self.dense = base.Dense(
            channels, len(common.LABELS),
            output_nonlinearity='sigmoid',
            hidden_layers=dense_layers,
            hidden_nonlinearity=dense_nonlinearily,
            input_dropout=dense_dropout,
            hidden_dropout=dense_dropout)
示例#4
0
    def __init__(self,
                 vocab,
                 hidden_layers,
                 hidden_units,
                 hidden_nonlinearity,
                 input_dropout=0,
                 hidden_dropout=0):
        super().__init__(vocab)

        self.dense = base.Dense(vocab.vectors.shape[1],
                                len(common.LABELS),
                                output_nonlinearity='sigmoid',
                                hidden_layers=[hidden_units] * hidden_layers,
                                hidden_nonlinearity=hidden_nonlinearity,
                                input_dropout=input_dropout,
                                hidden_dropout=hidden_dropout)
示例#5
0
文件: gcnn.py 项目: nptit/kaggle
    def __init__(self, vocab, num_blocks, num_layers, num_channels,
                 kernel_size, dense_layers, dense_dropout):
        super().__init__(vocab)

        embedding_size = vocab.vectors.shape[1]
        self.glu0 = GLU(embedding_size, num_channels, kernel_size)

        self.num_blocks = num_blocks
        for i in range(1, self.num_blocks + 1):
            block = ResidualBlock(num_channels, num_channels, kernel_size,
                                  num_layers)
            setattr(self, f'block{i}', block)

        self.dense = base.Dense(num_channels,
                                len(common.LABELS),
                                output_nonlinearity='sigmoid',
                                hidden_layers=dense_layers,
                                hidden_nonlinearity='relu',
                                input_dropout=dense_dropout,
                                hidden_dropout=dense_dropout)