示例#1
0
    def __init__(self, opts, vocab, char_vocab, label_vocab):
        super(Char_CNN, self).__init__()

        random.seed(opts.seed)
        torch.manual_seed(opts.seed)
        torch.cuda.manual_seed(opts.seed)

        self.embed_dim = opts.embed_size
        self.char_embed_dim = opts.char_embed_size
        self.word_num = vocab.m_size
        self.char_num = char_vocab.m_size
        self.pre_embed_path = opts.pre_embed_path
        self.string2id = vocab.string2id
        self.char_string2id = char_vocab.string2id
        self.embed_uniform_init = opts.embed_uniform_init
        self.stride = opts.stride
        self.kernel_size = opts.kernel_size
        self.kernel_num = opts.kernel_num
        self.label_num = label_vocab.m_size
        self.embed_dropout = opts.embed_dropout
        self.fc_dropout = opts.fc_dropout

        self.use_cuda = opts.use_cuda

        self.word_embeddings = nn.Embedding(self.word_num, self.embed_dim)
        self.char_embeddings = nn.Embedding(self.char_num, self.char_embed_dim)

        if opts.pre_embed_path != '':
            embedding = Embedding.load_predtrained_emb_zero(
                self.pre_embed_path, self.string2id)
            self.word_embeddings.weight.data.copy_(embedding)
        else:
            nn.init.uniform_(self.word_embeddings.weight.data,
                             -self.embed_uniform_init, self.embed_uniform_init)
        nn.init.uniform_(self.char_embeddings.weight.data,
                         -self.embed_uniform_init, self.embed_uniform_init)

        word_char_embed_dim = self.embed_dim + len(
            self.kernel_size) * self.kernel_num

        self.word_char_convs = nn.ModuleList([
            nn.Conv2d(1,
                      self.kernel_num, (K, word_char_embed_dim),
                      stride=self.stride,
                      padding=(K // 2, 0)) for K in self.kernel_size
        ])

        self.char_convs = nn.ModuleList([
            nn.Conv2d(1,
                      self.kernel_num, (K, self.char_embed_dim),
                      stride=self.stride,
                      padding=(K // 2, 0)) for K in self.kernel_size
        ])

        infea = len(self.kernel_size) * self.kernel_num
        self.linear1 = nn.Linear(infea, infea // 2)
        self.linear2 = nn.Linear(infea // 2, self.label_num)

        self.embed_dropout = nn.Dropout(self.embed_dropout)
        self.fc_dropout = nn.Dropout(self.fc_dropout)
    def __init__(self, opts, vocab, label_vocab, rel_vocab):
        super(biChildSumTreeLSTM, self).__init__()

        random.seed(opts.seed)
        torch.manual_seed(opts.seed)
        torch.cuda.manual_seed(opts.seed)

        self.embed_dim = opts.embed_size
        self.word_num = vocab.m_size
        self.pre_embed_path = opts.pre_embed_path
        self.string2id = vocab.string2id
        self.embed_uniform_init = opts.embed_uniform_init
        self.label_num = label_vocab.m_size
        self.rel_num = rel_vocab.m_size
        self.dropout = opts.dropout
        self.hidden_size = opts.hidden_size
        self.hidden_num = opts.hidden_num
        self.bidirectional = opts.bidirectional
        self.use_cuda = opts.use_cuda
        self.debug = False

        self.embeddings = nn.Embedding(self.word_num, self.embed_dim)
        self.dropout = nn.Dropout(self.dropout)
        if opts.pre_embed_path != '':
            embedding = Embedding.load_predtrained_emb_zero(self.pre_embed_path, self.string2id)
            self.embeddings.weight.data.copy_(embedding)

        self.dt_tree = DTTreeLSTM(self.embed_dim, self.hidden_size, opts.dropout)
        self.td_tree = TDTreeLSTM(self.embed_dim, self.hidden_size, opts.dropout)

        self.linear = nn.Linear(self.hidden_size * 2, self.label_num)
    def __init__(self, opts, vocab, label_vocab):
        super(Pooling, self).__init__()

        random.seed(opts.seed)
        torch.manual_seed(opts.seed)
        torch.cuda.manual_seed(opts.seed)

        self.embed_dim = opts.embed_size
        self.word_num = vocab.m_size
        self.pre_embed_path = opts.pre_embed_path
        self.string2id = vocab.string2id
        self.embed_uniform_init = opts.embed_uniform_init
        self.label_num = label_vocab.m_size
        self.embed_dropout = opts.embed_dropout
        self.fc_dropout = opts.fc_dropout

        self.embeddings = nn.Embedding(self.word_num, self.embed_dim)
        if opts.pre_embed_path != '':
            embedding = Embedding.load_predtrained_emb_zero(self.pre_embed_path, self.string2id)
            self.embeddings.weight.data.copy_(embedding)
        else:
            nn.init.uniform_(self.embeddings.weight.data, -self.embed_uniform_init, self.embed_uniform_init)

        self.linear1 = nn.Linear(self.embed_dim, self.embed_dim // 2)
        self.linear2 = nn.Linear(self.embed_dim // 2, self.label_num)

        self.embed_dropout = nn.Dropout(self.embed_dropout)
        self.fc_dropout = nn.Dropout(self.fc_dropout)
示例#4
0
    def __init__(self, opts, vocab, label_vocab):
        super(LSTM_CNN, self).__init__()

        random.seed(opts.seed)
        torch.manual_seed(opts.seed)
        torch.cuda.manual_seed(opts.seed)

        self.embed_dim = opts.embed_size
        self.word_num = vocab.m_size
        self.pre_embed_path = opts.pre_embed_path
        self.string2id = vocab.string2id
        self.embed_uniform_init = opts.embed_uniform_init
        self.label_num = label_vocab.m_size
        self.embed_dropout = opts.embed_dropout
        self.fc_dropout = opts.fc_dropout

        # CNN
        self.stride = opts.stride
        self.kernel_size = opts.kernel_size
        self.kernel_num = opts.kernel_num

        # RNN
        self.hidden_num = opts.hidden_num
        self.hidden_size = opts.hidden_size
        self.hidden_dropout = opts.hidden_dropout
        self.bidirectional = opts.bidirectional

        self.flag = 2 if self.bidirectional else 1

        self.embeddings = nn.Embedding(self.word_num, self.embed_dim)
        if opts.pre_embed_path != '':
            embedding = Embedding.load_predtrained_emb_zero(
                self.pre_embed_path, self.string2id)
            self.embeddings.weight.data.copy_(embedding)
        else:
            nn.init.uniform_(self.embeddings.weight.data,
                             -self.embed_uniform_init, self.embed_uniform_init)

        self.convs = nn.ModuleList([
            nn.Conv2d(1,
                      self.kernel_num, (K, self.hidden_size * self.flag),
                      stride=self.stride,
                      padding=(K // 2, 0)) for K in self.kernel_size
        ])

        self.lstm = nn.LSTM(self.embed_dim,
                            self.hidden_size,
                            dropout=self.hidden_dropout,
                            num_layers=self.hidden_num,
                            batch_first=True,
                            bidirectional=self.bidirectional)

        in_fea = len(self.kernel_size) * self.kernel_num

        self.linear1 = nn.Linear(in_fea, in_fea // 2)
        self.linear2 = nn.Linear(in_fea // 2, self.label_num)

        self.embed_dropout = nn.Dropout(self.embed_dropout)
        self.fc_dropout = nn.Dropout(self.fc_dropout)
示例#5
0
    def __init__(self, opts, vocab, label_vocab):
        super(LSTM_TreeLSTM, self).__init__()

        random.seed(opts.seed)
        torch.manual_seed(opts.seed)
        torch.cuda.manual_seed(opts.seed)

        self.embed_dim = opts.embed_size
        self.word_num = vocab.m_size
        self.pre_embed_path = opts.pre_embed_path
        self.string2id = vocab.string2id
        self.embed_uniform_init = opts.embed_uniform_init
        self.label_num = label_vocab.m_size
        self.embed_dropout = opts.embed_dropout
        self.fc_dropout = opts.fc_dropout
        self.hidden_size = opts.hidden_size
        self.hidden_num = opts.hidden_num
        self.hidden_dropout = opts.hidden_dropout
        self.bidirectional = opts.bidirectional
        self.flag = 2 if self.bidirectional else 1
        self.use_cuda = opts.use_cuda
        self.debug = False

        self.embeddings = nn.Embedding(self.word_num, self.embed_dim)
        if opts.pre_embed_path != '':
            embedding = Embedding.load_predtrained_emb_zero(
                self.pre_embed_path, self.string2id)
            self.embeddings.weight.data.copy_(embedding)

        self.lstm = nn.LSTM(self.embed_dim,
                            self.hidden_size,
                            num_layers=self.hidden_num,
                            dropout=self.hidden_dropout,
                            batch_first=True,
                            bidirectional=self.bidirectional)

        # build lstm
        self.ix = nn.Linear(self.embed_dim, self.hidden_size)
        self.ih = nn.Linear(self.hidden_size, self.hidden_size)

        self.fx = nn.Linear(self.embed_dim, self.hidden_size)
        self.fh = nn.Linear(self.hidden_size, self.hidden_size)

        self.ox = nn.Linear(self.embed_dim, self.hidden_size)
        self.oh = nn.Linear(self.hidden_size, self.hidden_size)

        self.ux = nn.Linear(self.embed_dim, self.hidden_size)
        self.uh = nn.Linear(self.hidden_size, self.hidden_size)

        self.linear1 = nn.Linear(self.hidden_size * (self.flag + 1),
                                 self.hidden_size // 2)
        self.linear2 = nn.Linear(self.hidden_size // 2, self.label_num)

        self.hidden_dropout = nn.Dropout(self.hidden_dropout)
        self.embed_dropout = nn.Dropout(self.embed_dropout)
        self.fc_dropout = nn.Dropout(self.fc_dropout)
示例#6
0
    def __init__(self, opts, vocab, label_vocab):
        super(ChildSumTreeLSTM, self).__init__()

        random.seed(opts.seed)
        torch.manual_seed(opts.seed)
        torch.cuda.manual_seed(opts.seed)
        
        # embedding parameters
        self.embed_dim = opts.embed_size
        self.word_num = vocab.m_size
        self.pre_embed_path = opts.pre_embed_path
        self.str2idx = vocab.str2idx
        self.embed_uniform_init = opts.embed_uniform_init
        self.label_num = label_vocab.m_size
        self.embed_dropout = opts.embed_dropout
        
        # network parameters
        self.fc_dropout = opts.fc_dropout
        self.hidden_size = opts.hidden_size
        self.use_cuda = opts.use_cuda


        self.embeddings = nn.Embedding(self.word_num, self.embed_dim)
        if opts.pre_embed_path != '':
            embedding = Embedding.load_predtrained_emb_zero(self.pre_embed_path, self.str2idx)
            self.embeddings.weight.data.copy_(embedding)

        # build lstm: following the notations as the paper
        
        # input unit
        self.ix = nn.Linear(self.embed_dim, self.hidden_size)
        self.ih = nn.Linear(self.hidden_size, self.hidden_size)
        
        # forget unit
        self.fx = nn.Linear(self.embed_dim, self.hidden_size)
        self.fh = nn.Linear(self.hidden_size, self.hidden_size)

        # output unit
        self.ox = nn.Linear(self.embed_dim, self.hidden_size)
        self.oh = nn.Linear(self.hidden_size, self.hidden_size)
        
        # utility
        self.ux = nn.Linear(self.embed_dim, self.hidden_size)
        self.uh = nn.Linear(self.hidden_size, self.hidden_size)

        self.out = nn.Linear(self.hidden_size, self.label_num)

        self.embed_dropout = nn.Dropout(self.embed_dropout)
        self.fc_dropout = nn.Dropout(self.fc_dropout)

        if self.use_cuda:
            self.loss = self.loss.cuda()
示例#7
0
    def __init__(self, opts, vocab, label_vocab):
        super(CNN, self).__init__()

        random.seed(opts.seed)
        torch.cuda.manual_seed(opts.gpu_seed)

        # embedding parameters
        self.embed_dim = opts.embed_size
        self.vocab_size = vocab.m_size
        self.pre_embed_path = opts.pre_embed_path
        self.str2idx = vocab.str2idx
        self.embed_uniform_init = opts.embed_uniform_init
        # network parameters
        self.stride = opts.stride
        self.kernel_size = opts.kernel_size
        self.kernel_num = opts.kernel_num
        self.label_num = label_vocab.m_size
        self.embed_dropout = opts.embed_dropout
        self.fc_dropout = opts.fc_dropout

        # embeddings
        self.embeddings = nn.Embedding(self.vocab_size, self.embed_dim)
        if opts.pre_embed_path != '':
            embedding = Embedding.load_predtrained_emb_zero(
                self.pre_embed_path, self.str2idx)
            self.embeddings.weight.data.copy_(embedding)
        else:
            nn.init.uniform_(self.embeddings.weight.data,
                             -self.embed_uniform_init, self.embed_uniform_init)

        self.convs = nn.ModuleList([
            nn.Conv2d(1,
                      self.kernel_num, (K, self.embed_dim),
                      stride=self.stride,
                      padding=(K // 2, 0)) for K in self.kernel_size
        ])

        #       torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True)

        in_fea = len(self.kernel_size) * self.kernel_num

        # dense layer
        # torch.nn.Linear(in_features, out_features, bias=True)
        self.linear1 = nn.Linear(in_fea, in_fea // 2)
        self.linear2 = nn.Linear(in_fea // 2, self.label_num)

        self.embed_dropout = nn.Dropout(self.embed_dropout)
        self.fc_dropout = nn.Dropout(self.fc_dropout)
示例#8
0
    def __init__(self, opts, vocab, label_vocab):
        super(ChildSumTreeLSTM, self).__init__()

        random.seed(opts.seed)
        torch.manual_seed(opts.seed)
        torch.cuda.manual_seed(opts.seed)

        self.embed_dim = opts.embed_size
        self.word_num = vocab.m_size
        self.pre_embed_path = opts.pre_embed_path
        self.string2id = vocab.string2id
        self.embed_uniform_init = opts.embed_uniform_init
        self.label_num = label_vocab.m_size
        self.embed_dropout = opts.embed_dropout
        self.fc_dropout = opts.fc_dropout
        self.hidden_size = opts.hidden_size
        self.use_cuda = opts.use_cuda

        self.embeddings = nn.Embedding(self.word_num, self.embed_dim)
        if opts.pre_embed_path != '':
            embedding = Embedding.load_predtrained_emb_zero(
                self.pre_embed_path, self.string2id)
            self.embeddings.weight.data.copy_(embedding)

        # build lstm
        self.ix = nn.Linear(self.embed_dim, self.hidden_size)
        self.ih = nn.Linear(self.hidden_size, self.hidden_size)

        self.fx = nn.Linear(self.embed_dim, self.hidden_size)
        self.fh = nn.Linear(self.hidden_size, self.hidden_size)

        self.ox = nn.Linear(self.embed_dim, self.hidden_size)
        self.oh = nn.Linear(self.hidden_size, self.hidden_size)

        self.ux = nn.Linear(self.embed_dim, self.hidden_size)
        self.uh = nn.Linear(self.hidden_size, self.hidden_size)

        self.out = nn.Linear(self.hidden_size, self.label_num)

        self.embed_dropout = nn.Dropout(self.embed_dropout)
        self.fc_dropout = nn.Dropout(self.fc_dropout)

        if self.use_cuda:
            self.loss = self.loss.cuda()
    def __init__(self, opts, vocab, label_vocab):
        super(Child_Sum_Tree_LSTM, self).__init__()

        random.seed(opts.seed)
        torch.manual_seed(opts.seed)
        torch.cuda.manual_seed(opts.seed)

        self.embed_dim = opts.embed_size
        self.word_num = vocab.m_size
        self.pre_embed_path = opts.pre_embed_path
        self.string2id = vocab.string2id
        self.embed_uniform_init = opts.embed_uniform_init
        self.label_num = label_vocab.m_size
        self.embed_dropout = opts.embed_dropout
        self.fc_dropout = opts.fc_dropout

        self.embeddings = nn.Embedding(self.word_num, self.embed_dim)
        if opts.pre_embed_path != '':
            embedding = Embedding.load_predtrained_emb_zero(self.pre_embed_path, self.string2id)
            self.embeddings.weight.data.copy_(embedding)
示例#10
0
    def __init__(self, opts, vocab, label_vocab):
        super(GRU, self).__init__()

        random.seed(opts.seed)
        torch.manual_seed(opts.seed)
        torch.cuda.manual_seed(opts.seed)

        self.embed_dim = opts.embed_size
        self.word_num = vocab.m_size
        self.pre_embed_path = opts.pre_embed_path
        self.string2id = vocab.string2id
        self.embed_uniform_init = opts.embed_uniform_init
        self.label_num = label_vocab.m_size
        self.embed_dropout = opts.embed_dropout
        self.fc_dropout = opts.fc_dropout
        self.hidden_num = opts.hidden_num
        self.hidden_size = opts.hidden_size
        self.hidden_dropout = opts.hidden_dropout
        self.bidirectional = opts.bidirectional

        self.embeddings = nn.Embedding(self.word_num, self.embed_dim)
        if opts.pre_embed_path != '':
            embedding = Embedding.load_predtrained_emb_zero(self.pre_embed_path, self.string2id)
            self.embeddings.weight.data.copy_(embedding)
        else:
            nn.init.uniform_(self.embeddings.weight.data, -self.embed_uniform_init, self.embed_uniform_init)

        self.gru = nn.GRU(
            self.embed_dim,
            self.hidden_size,
            dropout=self.hidden_dropout,
            num_layers=self.hidden_num,
            batch_first=True,
            bidirectional=self.bidirectional
        )
        self.embed_dropout = nn.Dropout(self.embed_dropout)
        self.fc_dropout = nn.Dropout(self.fc_dropout)
        self.linear1 = nn.Linear(self.hidden_size * 2, self.hidden_size // 2)
        self.linear2 = nn.Linear(self.hidden_size // 2, self.label_num)
示例#11
0
    def __init__(self, opts, vocab, label_vocab):
        super(CNN_TreeLSTM, self).__init__()

        random.seed(opts.seed)
        torch.cuda.manual_seed(opts.gpu_seed)

        self.embed_dim = opts.embed_size
        self.word_num = vocab.m_size
        self.pre_embed_path = opts.pre_embed_path
        self.string2id = vocab.string2id
        self.embed_uniform_init = opts.embed_uniform_init
        self.stride = opts.stride
        self.kernel_size = opts.kernel_size
        self.kernel_num = opts.kernel_num
        self.hidden_size = opts.hidden_size
        self.use_cuda = opts.use_cuda
        self.label_num = label_vocab.m_size
        self.embed_dropout = opts.embed_dropout
        self.fc_dropout = opts.fc_dropout
        self.hidden_dropout = opts.hidden_dropout
        self.debug = True

        self.embeddings = nn.Embedding(self.word_num, self.embed_dim)
        if opts.pre_embed_path != '':
            embedding = Embedding.load_predtrained_emb_zero(
                self.pre_embed_path, self.string2id)
            self.embeddings.weight.data.copy_(embedding)
        else:
            nn.init.uniform_(self.embeddings.weight.data,
                             -self.embed_uniform_init, self.embed_uniform_init)

        self.convs = nn.ModuleList([
            nn.Conv2d(1,
                      self.kernel_num, (K, self.embed_dim),
                      stride=self.stride,
                      padding=(K // 2, 0)) for K in self.kernel_size
        ])

        in_fea = len(self.kernel_size) * self.kernel_num + self.hidden_size

        self.linear1 = nn.Linear(in_fea, in_fea // 4)
        self.linear2 = nn.Linear(in_fea // 4, self.label_num)

        # build lstm
        self.ix = nn.Linear(self.embed_dim, self.hidden_size)
        self.ih = nn.Linear(self.hidden_size, self.hidden_size)

        self.fx = nn.Linear(self.embed_dim, self.hidden_size)
        self.fh = nn.Linear(self.hidden_size, self.hidden_size)

        self.ox = nn.Linear(self.embed_dim, self.hidden_size)
        self.oh = nn.Linear(self.hidden_size, self.hidden_size)

        self.ux = nn.Linear(self.embed_dim, self.hidden_size)
        self.uh = nn.Linear(self.hidden_size, self.hidden_size)

        # self.out = nn.Linear(self.hidden_size, self.label_num)

        #dropout
        self.hidden_dropout = nn.Dropout(self.hidden_dropout)
        self.embed_dropout = nn.Dropout(self.embed_dropout)
        self.fc_dropout = nn.Dropout(self.fc_dropout)