def __init__(self, embedding_dim, hidden_dim, max_length):
        super(Decoder, self).__init__()

        self.embedding_dim = embedding_dim
        self.hidden_dim = hidden_dim
        self.max_length = max_length

        self.input_weights = nn.Linear(hidden_dim, 3 * hidden_dim)
        self.input_weights.weight.readable_name = 'Decoder-ih-w'
        self.input_weights.bias.readable_name = 'Decoder-ih-b'

        self.hidden_weights = nn.Linear(hidden_dim, 3 * hidden_dim)
        self.hidden_weights.weight.readable_name = 'Decoder-hh-w'
        self.hidden_weights.bias.readable_name = 'Decoder-hh-b'
        # self.input_reduction = nn.Linear(embedding_dim, hidden_dim)
        self.pointer_s = Attention(hidden_dim, modulename='Head')
        self.pointer_e = Attention(hidden_dim, modulename='Tail')

        # self.encoding_s = torch.nn.Conv1d(hidden_dim, hidden_dim, kernel_size=2, stride=1, padding=0)
        # set_readable_param_names(self.encoding_s, 'Decoder-encoding-s')
        self.cls = torch.nn.Conv1d(hidden_dim,
                                   2,
                                   kernel_size=3,
                                   stride=1,
                                   padding=0)
    def __init__(self,
                 embedding_dim,
                 hidden_dim,
                 max_length):
        super(Decoder, self).__init__()

        self.embedding_dim = embedding_dim
        self.hidden_dim = hidden_dim
        self.max_length = max_length

        self.input_weights = nn.Linear(hidden_dim, 3 * hidden_dim)
        self.input_weights.weight.readable_name = 'Decoder-ih-w'
        self.input_weights.bias.readable_name = 'Decoder-ih-b'

        self.hidden_weights = nn.Linear(hidden_dim, 3 * hidden_dim)
        self.hidden_weights.weight.readable_name = 'Decoder-hh-w'
        self.hidden_weights.bias.readable_name = 'Decoder-hh-b'
        # self.input_reduction = nn.Linear(embedding_dim, hidden_dim)
        self.pointer_s = Attention(hidden_dim, modulename='Head')
        self.pointer_e = Attention(hidden_dim, modulename='Tail')

        self.cls = torch.nn.Conv1d(2, 2, kernel_size=hidden_dim, stride=1, padding=0)
        self.cls.weight.readable_name = 'Decoder-ScoreConv-w'
        self.cls.bias.readable_name = 'Decoder-ScoreConv-b'
        self.sm = nn.Softmax(dim=1)
        # prev_v records previous pointed encoding features...
        # self.prev_s_enc  = nn.Parameter(torch.zeros(hidden_dim), requires_grad=False)
        self.prev_e_enc  = nn.Parameter(torch.zeros(hidden_dim), requires_grad=False)
Beispiel #3
0
    def __init__(self, embedding_dim, hidden_dim, max_length):
        super(Decoder, self).__init__()

        self.embedding_dim = embedding_dim
        self.hidden_dim = hidden_dim
        self.max_length = max_length

        self.input_weights = nn.Linear(embedding_dim, 4 * hidden_dim)
        self.hidden_weights = nn.Linear(hidden_dim, 4 * hidden_dim)

        self.pointer_s = Attention(hidden_dim)
        self.pointer_e = Attention(hidden_dim)

        self.cls = torch.nn.Conv1d(2,
                                   2,
                                   kernel_size=hidden_dim,
                                   stride=1,
                                   padding=0)

        self.sm = nn.Softmax(dim=1)
Beispiel #4
0
    def __init__(self, hidden_dim, max_length):
        super(Decoder, self).__init__()

        self.hidden_dim = hidden_dim
        self.max_length = max_length

        self.input_weights = nn.Linear(hidden_dim, 3 * hidden_dim)
        self.input_weights.weight.readable_name = 'Decoder-ih-w'
        self.input_weights.bias.readable_name = 'Decoder-ih-b'

        self.hidden_weights = nn.Linear(hidden_dim, 3 * hidden_dim)
        self.hidden_weights.weight.readable_name = 'Decoder-hh-w'
        self.hidden_weights.bias.readable_name = 'Decoder-hh-b'
        # self.input_reduction = nn.Linear(embedding_dim, hidden_dim)
        self.pointer = Attention(hidden_dim, modulename='pointer')

        self.cls = torch.nn.Conv1d(2,
                                   2,
                                   kernel_size=hidden_dim,
                                   stride=1,
                                   padding=0)
        self.cls.weight.readable_name = 'Decoder-ScoreConv-w'
        self.cls.bias.readable_name = 'Decoder-ScoreConv-b'
        self.sm = nn.Softmax(dim=1)