def __init__(self, config, encoder_output_dim, action_dict, ent_dict,
                 tri_dict, arg_dict):
        self.config = config
        self.model = pm.global_collection()
        bi_rnn_dim = encoder_output_dim  # config['rnn_dim'] * 2 #+ config['edge_embed_dim']
        lmda_dim = config['lmda_rnn_dim']
        part_ent_dim = config['part_ent_rnn_dim']

        self.lmda_dim = lmda_dim
        self.bi_rnn_dim = bi_rnn_dim

        hidden_input_dim = lmda_dim * 3 + bi_rnn_dim * 2 + config['out_rnn_dim']

        self.hidden_arg = nn.Linear(hidden_input_dim,
                                    config['output_hidden_dim'],
                                    activation='tanh')
        self.output_arg = nn.Linear(config['output_hidden_dim'], len(arg_dict))

        hidden_input_dim_co = lmda_dim * 3 + bi_rnn_dim * 2 + config[
            'out_rnn_dim']
        self.hidden_ent_corel = nn.Linear(hidden_input_dim_co,
                                          config['output_hidden_dim'],
                                          activation='tanh')
        self.output_ent_corel = nn.Linear(config['output_hidden_dim'], 2)

        self.position_embed = nn.Embedding(500, 20)

        attn_input = self.bi_rnn_dim * 1 + 20 * 2
        self.attn_hidden = nn.Linear(attn_input, 80, activation='tanh')
        self.attn_out = nn.Linear(80, 1)
Exemple #2
0
 def __init__(self, bi_rnn_dim):
     self.var = None
     self.idx = -1
     self.model = pm.global_collection()
     self.bi_rnn_dim = bi_rnn_dim
     self.lmda_empty_embedding = self.model.add_parameters((bi_rnn_dim, ))
     self.lambda_type = LambdaVar.OTHERS
Exemple #3
0
 def __init__(self, n_hid, model=None):
     if model is None:
         model = pm.global_collection()
     self.p_g = model.add_parameters(dim=n_hid,
                                     init=dy.ConstInitializer(1.0))
     self.p_b = model.add_parameters(dim=n_hid,
                                     init=dy.ConstInitializer(0.0))
Exemple #4
0
    def __init__(self,
                 n_in,
                 n_hidden,
                 n_layer=1,
                 bidirectional=False,
                 lstm_params=None,
                 model=None,
                 dropout_x=0.,
                 dropout_h=0.):
        if model is None:
            model = pm.global_collection()
        self.bidirectional = bidirectional
        self.n_layer = n_layer
        rnn_builder_factory = LSTMCell
        self.fwd_builders = [
            rnn_builder_factory(model, n_in, n_hidden, dropout_x, dropout_h)
        ]
        if bidirectional:
            self.bwd_builders = [
                rnn_builder_factory(model, n_in, n_hidden, dropout_x,
                                    dropout_h)
            ]

        hidden_input_dim = n_hidden * 2 if bidirectional else n_hidden
        for _ in range(n_layer - 1):
            self.fwd_builders.append(
                rnn_builder_factory(model, hidden_input_dim, n_hidden))
            if bidirectional:
                self.bwd_builders.append(
                    rnn_builder_factory(model, hidden_input_dim, n_hidden))

        if lstm_params is not None:
            self._init_param(lstm_params)
Exemple #5
0
 def __init__(self, input_size, hidden_size, dropout_x=0., dropout_h=0.):
     super(StackLSTM, self).__init__()
     self.hidden_size = hidden_size
     model = pm.global_collection()
     self.cell = LSTMCell(model, input_size, hidden_size, dropout_x,
                          dropout_h)
     self.empty_embedding = model.add_parameters((self.hidden_size, ),
                                                 name='stackGuardEmb')
     self.states = []
     self.indices = []
    def __init__(self, config, encoder_output_dim , action_dict, ent_dict, tri_dict, arg_dict):
        self.config = config
        self.model = pm.global_collection()

        self.multi_task = MultiTask(config, encoder_output_dim , action_dict, ent_dict, tri_dict, arg_dict)
        self.arg_null_id = arg_dict[Vocab.NULL]

        bi_rnn_dim = encoder_output_dim  # config['rnn_dim'] * 2 #+ config['edge_embed_dim']
        lmda_dim = config['lmda_rnn_dim']
        part_ent_dim = config['part_ent_rnn_dim']

        self.lmda_dim = lmda_dim
        self.bi_rnn_dim = bi_rnn_dim
        self.lambda_var = nn.LambdaVar(lmda_dim)

        dp_state = config['dp_state']
        dp_state_h = config['dp_state_h']


        self.sigma_rnn = nn.StackLSTM(lmda_dim, lmda_dim, dp_state, dp_state_h)  # stack
        self.delta_rnn = nn.StackLSTM(lmda_dim, lmda_dim, dp_state, dp_state_h)   # will be pushed back

        self.part_ent_rnn = nn.StackLSTM(bi_rnn_dim, part_ent_dim, dp_state, dp_state_h)
        #self.beta = []  # buffer, unprocessed words
        self.actions_rnn = nn.StackLSTM(config['action_embed_dim'], config['action_rnn_dim'], dp_state, dp_state_h)
        self.out_rnn = nn.StackLSTM(bi_rnn_dim, config['out_rnn_dim'], dp_state, dp_state_h)

        self.act_table = nn.Embedding(len(action_dict), config['action_embed_dim'])
        self.ent_table = nn.Embedding(len(ent_dict), config['entity_embed_dim'])
        self.tri_table = nn.Embedding(len(tri_dict), config['trigger_embed_dim'])

        self.act= Actions(action_dict, ent_dict, tri_dict, arg_dict)

        hidden_input_dim = bi_rnn_dim + lmda_dim * 3 + part_ent_dim \
                           + config['action_rnn_dim'] + config['out_rnn_dim']

        self.hidden_linear = nn.Linear(hidden_input_dim, config['output_hidden_dim'], activation='tanh')
        self.output_linear = nn.Linear(config['output_hidden_dim'], len(action_dict))
        entity_embed_dim = config['entity_embed_dim']
        trigger_embed_dim = config['trigger_embed_dim']

        ent_to_lmda_dim = config['part_ent_rnn_dim'] + entity_embed_dim #+ config['sent_vec_dim'] * 4
        self.ent_to_lmda = nn.Linear(ent_to_lmda_dim, lmda_dim, activation='tanh')
        tri_to_lmda_dim = bi_rnn_dim + trigger_embed_dim #+ config['sent_vec_dim']
        self.tri_to_lmda = nn.Linear(tri_to_lmda_dim, lmda_dim, activation='tanh')

        self.hidden_arg = nn.Linear(lmda_dim * 2 + self.bi_rnn_dim, config['output_hidden_dim'],
                                    activation='tanh')
        self.output_arg = nn.Linear(config['output_hidden_dim'], len(arg_dict))
        self.empty_buffer_emb = self.model.add_parameters((bi_rnn_dim,), name='bufferGuardEmb')

        self.event_cons = EventConstraint(ent_dict, tri_dict, arg_dict)
        #self.cached_valid_args = self.cache_valid_args(ent_dict, tri_dict)

        self.empty_times = 0
Exemple #7
0
 def __init__(self,
              n_vocab,
              n_dim,
              init_weight=None,
              trainable=True,
              model=None,
              name='embed'):
     if model is None:
         model = pm.global_collection()
     self.trainable = trainable
     if init_weight is not None:
         self.embed = model.lookup_parameters_from_numpy(init_weight,
                                                         name=name)
     else:
         self.embed = model.add_lookup_parameters((n_vocab, n_dim),
                                                  name=name)
Exemple #8
0
    def __init__(self,
                 n_in,
                 n_out,
                 bias=True,
                 activation='linear',
                 model=None,
                 init_w=None):
        if model is None:
            model = pm.global_collection()

        if init_w is not None:
            self.W = model.parameters_from_numpy(init_w)
        else:
            self.W = model.add_parameters((n_out, n_in),
                                          init='glorot',
                                          name='linearW')
        self.bias = bias
        self.act = activation
        if bias:
            self.b = model.add_parameters((n_out), init=0, name='linearBias')
Exemple #9
0
    def __init__(self,
                 n_words,
                 action_dict,
                 ent_dict,
                 tri_dict,
                 arg_dict,
                 pos_dict,
                 pretrained_vec=None):
        pm.init_param_col()
        self.model = pm.global_collection()
        self.sent_model = dy.Model()
        self.optimizer = AdamTrainer(alpha=joint_config['init_lr'])
        self.optimizer.set_clip_threshold(joint_config['grad_clipping'])

        if not joint_config['use_pretrain_embed'] and not joint_config[
                'use_sentence_vec']:
            raise AttributeError(
                'At least one of use_pretrain_embed and use_sentence_vec should set to True'
            )

        if joint_config['use_pretrain_embed']:
            self.word_embed = nn.Embedding(
                n_words,
                joint_config['word_embed_dim'],
                init_weight=pretrained_vec,
                trainable=joint_config['pretrain_embed_tune'])

        if joint_config['use_char_rnn']:
            self.char_embed = nn.Embedding(joint_config['n_chars'],
                                           joint_config['char_embed_dim'],
                                           trainable=True)
            self.char_rnn = nn.MultiLayerLSTM(joint_config['char_embed_dim'],
                                              joint_config['char_rnn_dim'],
                                              bidirectional=True)

        if joint_config['use_pos']:
            self.pos_embed = nn.Embedding(len(pos_dict),
                                          joint_config['pos_embed_dim'],
                                          trainable=True)

        if joint_config['random_word_embed']:
            print('Random_word_embed: True')
            self.word_embed_tune = nn.Embedding(n_words,
                                                joint_config['word_embed_dim'],
                                                trainable=True)
            self.word_linear = nn.Linear(joint_config['word_embed_dim'] * 2,
                                         joint_config['word_embed_dim'],
                                         activation='relu')

        if joint_config['use_sentence_vec']:
            print('Use_sentence_vec (BERT): True')
            self.train_sent_embed = nn.Embedding(train_sent_arr.shape[0],
                                                 sent_vec_dim,
                                                 init_weight=train_sent_arr,
                                                 trainable=False,
                                                 name='trainSentEmbed',
                                                 model=self.sent_model)

            self.dev_sent_embed = nn.Embedding(dev_sent_arr.shape[0],
                                               sent_vec_dim,
                                               init_weight=dev_sent_arr,
                                               trainable=False,
                                               name='devSentEmbed')

            self.test_sent_embed = nn.Embedding(test_sent_arr.shape[0],
                                                sent_vec_dim,
                                                init_weight=test_sent_arr,
                                                trainable=False,
                                                name='testSentEmbed',
                                                model=self.sent_model)

            if joint_config['sent_vec_project'] > 0:
                print('Sentence_vec project to',
                      joint_config['sent_vec_project'])
                self.sent_project = nn.Linear(
                    sent_vec_dim,
                    joint_config['sent_vec_project'],
                    activation=joint_config['sent_vec_project_activation'])

        rnn_input = 0  # + config['char_rnn_dim'] * 2
        if joint_config['use_pretrain_embed']:
            rnn_input += joint_config['word_embed_dim']
            print('use_pretrain_embed:', joint_config['use_pretrain_embed'])

        if joint_config[
                'use_sentence_vec'] and not joint_config['cat_sent_after_rnn']:
            rnn_input += sent_vec_dim
            print('use_sentence_vec:', joint_config['use_sentence_vec'])

        if joint_config['use_pos']:
            rnn_input += joint_config['pos_embed_dim']
            print('use_pos:', joint_config['use_pos'])

        if joint_config['use_char_rnn']:
            rnn_input += joint_config['char_rnn_dim'] * 2
            print('use_char_rnn:', joint_config['use_char_rnn'])

        if joint_config['use_rnn_encoder']:
            self.encoder = nn.MultiLayerLSTM(
                rnn_input,
                joint_config['rnn_dim'],
                n_layer=joint_config['encoder_layer'],
                bidirectional=True,
                dropout_x=joint_config['dp_state'],
                dropout_h=joint_config['dp_state_h'])

        self.encoder_output_dim = 0
        if joint_config['use_rnn_encoder']:
            self.encoder_output_dim += joint_config['rnn_dim'] * 2

        elif joint_config['use_pretrain_embed']:
            self.encoder_output_dim += joint_config['word_embed_dim']
            if joint_config['use_pos']:
                self.encoder_output_dim += joint_config['pos_embed_dim']

        if joint_config['cat_sent_after_rnn'] and joint_config[
                'use_sentence_vec']:
            self.encoder_output_dim += sent_vec_dim

        if joint_config['encoder_project'] > 0:
            self.encoder_project = nn.Linear(self.encoder_output_dim,
                                             joint_config['encoder_project'])

        self.encoder_output_dim = joint_config[
            'encoder_project'] if joint_config[
                'encoder_project'] > 0 else self.encoder_output_dim

        # shift reduce parser
        self.shift_reduce = ShiftReduce(joint_config, self.encoder_output_dim,
                                        action_dict, ent_dict, tri_dict,
                                        arg_dict)