コード例 #1
0
    def __init__(self, args, max_n_spans):
        super(TransDecoder, self).__init__()
        self.hDim = args.hDim
        self.dropout = args.dropout
        self.dropout_lstm = args.dropout_lstm
        
        self.relative_adu_info_size = args.relative_adu_info_size
        self.relative_post_info_size = args.relative_post_info_size
        self.relative_position_info_size = self.relative_adu_info_size + self.relative_post_info_size

        self.lstm_type = args.lstm_type

        self.dropout = nn.Dropout(self.dropout)

        # output of ADU layer
        if(self.lstm_type):
            self.ac_shell_rep_size_out = args.ac_shell_rep_size_out
            self.LastBilstm = module.lstm(input_dim=self.ac_shell_rep_size_out, output_dim=self.hDim,
                                num_layers=1, batch_first=True, dropout=self.dropout_lstm)
            self.reps_for_type_classification = 2*self.hDim
        else:
            self.reps_for_type_classification = args.ac_shell_rep_size_out

        self.AcTypeLayer = nn.Linear(in_features=self.reps_for_type_classification, out_features=1)

        # the size of ADU representations for link identification
        # share the parameter
        self.type_rep_size = 2*self.hDim if(self.lstm_type) else args.ac_shell_rep_size_out
        self.span_pair_size = self.type_rep_size*3 + self.relative_position_info_size
        self.LinkLayer = nn.Sequential(
                nn.Linear(in_features=self.span_pair_size, out_features=64),
                nn.Dropout(0.2),
                nn.ReLU(),
                nn.Linear(in_features=64, out_features=2)
                )
コード例 #2
0
    def __init__(self, args):
        super(HeterogeneousGraph, self).__init__()

        self.nhid = args.nhid
        self.layers_len = args.graph_layers
        self.layers = nn.ModuleList()
        self.layers.append(
            HeterogeneousLayer(args.nfeat,
                               args.nhid,
                               args.nheads,
                               args.ngraph,
                               dropout=args.dropout,
                               alpha=args.graph_alpha,
                               concat=True,
                               dtype=args.graph))
        for _ in range(args.graph_layers - 1):
            self.layers.append(
                HeterogeneousLayer(args.nhid,
                                   args.nhid,
                                   args.nheads,
                                   args.ngraph,
                                   dropout=args.dropout,
                                   alpha=args.graph_alpha,
                                   concat=True,
                                   dtype=args.graph))
        self.dropout = nn.Dropout(args.dropout)
        self.concat = args.concat

        if (args.rnn == 'LSTM'):
            self.rnn = module.lstm(args.nhid, args.nhid, bidirectional=False)
        elif (args.rnn == 'GRU'):
            self.rnn = module.gru(args.nhid, args.nhid, bidirectional=False)
 def __init__(self, args, bidirectional=True):
     super(para_encoder, self).__init__()
     self.args = args
     if (args.rnn == 'LSTM'):
         self.encoder = module.lstm(args.para_in,
                                    args.hDim,
                                    bidirectional=bidirectional)
     elif (args.rnn == 'GRU'):
         self.encoder = module.gru(args.para_in,
                                   args.hDim,
                                   bidirectional=bidirectional)
     self.init()
    def __init__(self, args):
        super(Elmo_encoder, self).__init__()
        ##########################
        # set default attributes #
        ##########################
        self.eDim = args.eDim
        self.hDim = args.hDim
        self.dropout_lstm = args.dropout_lstm
        self.dropout_word = args.dropout_word
        self.dropout_embedding = args.dropout_embedding

        self.args = args
        self.word_dropout = nn.Dropout2d(self.dropout_word)
        ################
        # elmo setting #
        ################
        self.eDim = 1024
        args.eDim = 1024

        self.elmo_task_gamma = nn.Parameter(torch.ones(1))
        self.elmo_task_s = nn.Parameter(torch.ones(3))
        self.elmo_dropout = nn.Dropout(self.dropout_embedding)

        ##########
        # Default #
        ##########
        if (args.rnn == 'LSTM'):
            self.Bilstm = module.lstm(input_dim=self.eDim,
                                      output_dim=self.hDim,
                                      num_layers=1,
                                      batch_first=True,
                                      dropout=self.dropout_lstm,
                                      bidirectional=True)
        elif (args.rnn == 'GRU'):
            self.Bilstm = module.gru(input_dim=self.eDim,
                                     output_dim=self.hDim,
                                     num_layers=1,
                                     batch_first=True,
                                     dropout=self.dropout_lstm,
                                     bidirectional=True)

        #self.Topiclstm = module.lstm(input_dim=self.eDim, output_dim=self.hDim,
        #            num_layers=1, batch_first=True, dropout=self.dropout_lstm, bidirectional=True)

        self.init_para()
    def __init__(self, args):
        super(span_encoder, self).__init__()

        ##########################
        # set default attributes #
        ##########################
        self.hDim = args.hDim
        self.dropout_lstm = args.dropout_lstm
        self.max_n_spans = args.max_n_spans_para
        self.position_info_size = args.position_info_size
        self.args = args

        ###############
        # Select LSTM #
        ###############
        self.lstm_ac = args.lstm_ac
        self.lstm_shell = args.lstm_shell
        self.lstm_ac_shell = args.lstm_ac_shell
        self.adu_label = args.adu_label

        label_size = 32
        self.span_rep_size = args.span_rep_size
        # output of AC layer
        if (self.lstm_ac):
            self.ac_rep_size = self.hDim * 2
            if (self.adu_label):
                self.span_rep_size += label_size

            if (args.rnn == 'LSTM'):
                self.AcBilstm = module.lstm(input_dim=self.span_rep_size,
                                            output_dim=self.hDim,
                                            num_layers=1,
                                            batch_first=True,
                                            dropout=self.dropout_lstm,
                                            bidirectional=True)
            elif (args.rnn == 'GRU'):
                self.AcBilstm = module.gru(input_dim=self.span_rep_size,
                                           output_dim=self.hDim,
                                           num_layers=1,
                                           batch_first=True,
                                           dropout=self.dropout_lstm,
                                           bidirectional=True)
        else:
            self.ac_rep_size = self.span_rep_size

        # output of AM layer
        if (self.lstm_shell):
            self.shell_rep_size = self.hDim * 2
            if (self.adu_label):
                self.span_rep_size += label_size
            if (args.rnn == 'LSTM'):
                self.ShellBilstm = module.lstm(input_dim=self.span_rep_size,
                                               output_dim=self.hDim,
                                               num_layers=1,
                                               batch_first=True,
                                               dropout=self.dropout_lstm,
                                               bidirectional=True)
            elif (args.rnn == 'GRU'):
                self.ShellBilstm = module.gru(input_dim=self.span_rep_size,
                                              output_dim=self.hDim,
                                              num_layers=1,
                                              batch_first=True,
                                              dropout=self.dropout_lstm,
                                              bidirectional=True)
        else:
            self.shell_rep_size = self.span_rep_size

        # the size of ADU representation
        n_ac_shell_latm_layers = 1
        self.ac_shell_rep_size_in = self.ac_rep_size + self.shell_rep_size + self.position_info_size * 2
        if (self.adu_label):
            self.ac_shell_rep_size_in += label_size
        if (args.rnn == 'LSTM'):
            self.AcShellBilstm = module.lstm(
                input_dim=self.ac_shell_rep_size_in,
                output_dim=self.hDim,
                num_layers=n_ac_shell_latm_layers,
                batch_first=True,
                dropout=self.dropout_lstm,
                bidirectional=True)
        elif (args.rnn == 'GRU'):
            self.AcShellBilstm = module.gru(
                input_dim=self.ac_shell_rep_size_in,
                output_dim=self.hDim,
                num_layers=n_ac_shell_latm_layers,
                batch_first=True,
                dropout=self.dropout_lstm,
                bidirectional=True)
        args.ac_shell_rep_size_out = self.hDim * 2

        self.init_para()