Esempio n. 1
0
    def __init__(self, config, encoder_output_dim , action_dict, ent_dict, tri_dict, arg_dict):
        self.config = config
        self.model = pm.global_collection()

        self.multi_task = MultiTask(config, encoder_output_dim , action_dict, ent_dict, tri_dict, arg_dict)
        self.arg_null_id = arg_dict[Vocab.NULL]

        bi_rnn_dim = encoder_output_dim  # config['rnn_dim'] * 2 #+ config['edge_embed_dim']
        lmda_dim = config['lmda_rnn_dim']
        part_ent_dim = config['part_ent_rnn_dim']

        self.lmda_dim = lmda_dim
        self.bi_rnn_dim = bi_rnn_dim
        self.lambda_var = nn.LambdaVar(lmda_dim)

        dp_state = config['dp_state']
        dp_state_h = config['dp_state_h']


        self.sigma_rnn = nn.StackLSTM(lmda_dim, lmda_dim, dp_state, dp_state_h)  # stack
        self.delta_rnn = nn.StackLSTM(lmda_dim, lmda_dim, dp_state, dp_state_h)   # will be pushed back

        self.part_ent_rnn = nn.StackLSTM(bi_rnn_dim, part_ent_dim, dp_state, dp_state_h)
        #self.beta = []  # buffer, unprocessed words
        self.actions_rnn = nn.StackLSTM(config['action_embed_dim'], config['action_rnn_dim'], dp_state, dp_state_h)
        self.out_rnn = nn.StackLSTM(bi_rnn_dim, config['out_rnn_dim'], dp_state, dp_state_h)

        self.act_table = nn.Embedding(len(action_dict), config['action_embed_dim'])
        self.ent_table = nn.Embedding(len(ent_dict), config['entity_embed_dim'])
        self.tri_table = nn.Embedding(len(tri_dict), config['trigger_embed_dim'])

        self.act= Actions(action_dict, ent_dict, tri_dict, arg_dict)

        hidden_input_dim = bi_rnn_dim + lmda_dim * 3 + part_ent_dim \
                           + config['action_rnn_dim'] + config['out_rnn_dim']

        self.hidden_linear = nn.Linear(hidden_input_dim, config['output_hidden_dim'], activation='tanh')
        self.output_linear = nn.Linear(config['output_hidden_dim'], len(action_dict))
        entity_embed_dim = config['entity_embed_dim']
        trigger_embed_dim = config['trigger_embed_dim']

        ent_to_lmda_dim = config['part_ent_rnn_dim'] + entity_embed_dim #+ config['sent_vec_dim'] * 4
        self.ent_to_lmda = nn.Linear(ent_to_lmda_dim, lmda_dim, activation='tanh')
        tri_to_lmda_dim = bi_rnn_dim + trigger_embed_dim #+ config['sent_vec_dim']
        self.tri_to_lmda = nn.Linear(tri_to_lmda_dim, lmda_dim, activation='tanh')

        self.hidden_arg = nn.Linear(lmda_dim * 2 + self.bi_rnn_dim, config['output_hidden_dim'],
                                    activation='tanh')
        self.output_arg = nn.Linear(config['output_hidden_dim'], len(arg_dict))
        self.empty_buffer_emb = self.model.add_parameters((bi_rnn_dim,), name='bufferGuardEmb')

        self.event_cons = EventConstraint(ent_dict, tri_dict, arg_dict)
        #self.cached_valid_args = self.cache_valid_args(ent_dict, tri_dict)

        self.empty_times = 0
Esempio n. 2
0
    def __init__(self, model, statistics, options):
        super(SpanEmbeddings, self).__init__(model)
        self.options = options

        self.ldims = options.lstm_dims

        if options.ext_embedding is not None:
            self.ext_embedding = nn.ExternalEmbedding(self,
                                                      options.ext_embedding,
                                                      extra=("*EMPTY*",
                                                             "*START*",
                                                             "*END*"))
            e_dim = self.ext_embedding.dim
            logger.info('Load external embedding. Vector dimensions %d',
                        self.ext_embedding.dim)
        else:
            self.ext_embedding = None
            e_dim = 0

        self.total_dims = options.wembedding_dims + options.pembedding_dims + e_dim
        self.rnn = BiLSTMMinus(self, [self.total_dims] +
                               [self.ldims * 2] * options.span_lstm_layers)

        if options.cembedding_dims > 0 and options.word_threshold > 1:
            self.char_embedding = nn.Embedding(self,
                                               list(statistics.characters),
                                               options.cembedding_dims)
            self.c_lstm = nn.BiLSTM(
                self, [options.cembedding_dims] +
                [options.wembedding_dims] * options.span_lstm_layers)
            self.freq_words = set(word
                                  for word, count in statistics.words.items()
                                  if count >= options.word_threshold)
            logger.info("Word embedding size: {}".format(len(self.freq_words)))
            self.word_embedding = nn.Embedding(self,
                                               self.freq_words,
                                               options.wembedding_dims,
                                               extra=("*EMPTY*", "*START*",
                                                      "*END*"))
        else:
            self.word_embedding = nn.Embedding(self, list(statistics.words),
                                               options.wembedding_dims)

        if options.pembedding_dims > 0:
            self.pos_embedding = nn.Embedding(self,
                                              list(statistics.postags),
                                              options.pembedding_dims,
                                              extra=("*EMPTY*", "*START*",
                                                     "*END*"))
        self.init_special()
Esempio n. 3
0
    def __init__(self, config, encoder_output_dim, action_dict, ent_dict,
                 tri_dict, arg_dict):
        self.config = config
        self.model = pm.global_collection()
        bi_rnn_dim = encoder_output_dim  # config['rnn_dim'] * 2 #+ config['edge_embed_dim']
        lmda_dim = config['lmda_rnn_dim']
        part_ent_dim = config['part_ent_rnn_dim']

        self.lmda_dim = lmda_dim
        self.bi_rnn_dim = bi_rnn_dim

        hidden_input_dim = lmda_dim * 3 + bi_rnn_dim * 2 + config['out_rnn_dim']

        self.hidden_arg = nn.Linear(hidden_input_dim,
                                    config['output_hidden_dim'],
                                    activation='tanh')
        self.output_arg = nn.Linear(config['output_hidden_dim'], len(arg_dict))

        hidden_input_dim_co = lmda_dim * 3 + bi_rnn_dim * 2 + config[
            'out_rnn_dim']
        self.hidden_ent_corel = nn.Linear(hidden_input_dim_co,
                                          config['output_hidden_dim'],
                                          activation='tanh')
        self.output_ent_corel = nn.Linear(config['output_hidden_dim'], 2)

        self.position_embed = nn.Embedding(500, 20)

        attn_input = self.bi_rnn_dim * 1 + 20 * 2
        self.attn_hidden = nn.Linear(attn_input, 80, activation='tanh')
        self.attn_out = nn.Linear(80, 1)
    def __init__(self, model,
                 hrg_statistics,  # type: HRGStatistics
                 options):
        super(EmbeddingHRGScorer, self).__init__(model)
        self.options = options
        self.activation = nn.activations[options.activation]

        self.freq_edges = [edge for edge, count in hrg_statistics.edge_names.most_common(self.options.edge_count)]
        self.edge_embedding = nn.Embedding(self, list(self.freq_edges),
                                           options.edge_embedding_dim,
                                           init=dn.IdentityInitializer())

        dense_dims = [options.lstm_dims * 2 * options.lstm_layers + options.edge_embedding_dim] + options.hrg_mlp_dims + \
                     [1]
        # don't use bias in last transform
        use_bias = [True] * (len(dense_dims) - 2) + [False]

        self.dense_layer = nn.DenseLayers(self, dense_dims, self.activation, use_bias)
        self.attention_w1 = self.add_parameters((options.attention_dim,
                                                 options.edge_embedding_dim))
        self.attention_w2 = self.add_parameters((options.attention_dim,
                                                 options.lstm_dims * 2 * options.lstm_layers))
        self.attention_v = self.add_parameters((1, options.attention_dim))
Esempio n. 5
0
 def __init__(self):
     self.embed = nn.Embedding(3,4)
     self.embed.embed_w.const = em
     self.fc3=nn.rnn((20,2))
Esempio n. 6
0
    def __init__(self,
                 n_words,
                 action_dict,
                 ent_dict,
                 tri_dict,
                 arg_dict,
                 pos_dict,
                 pretrained_vec=None):
        pm.init_param_col()
        self.model = pm.global_collection()
        self.sent_model = dy.Model()
        self.optimizer = AdamTrainer(alpha=joint_config['init_lr'])
        self.optimizer.set_clip_threshold(joint_config['grad_clipping'])

        if not joint_config['use_pretrain_embed'] and not joint_config[
                'use_sentence_vec']:
            raise AttributeError(
                'At least one of use_pretrain_embed and use_sentence_vec should set to True'
            )

        if joint_config['use_pretrain_embed']:
            self.word_embed = nn.Embedding(
                n_words,
                joint_config['word_embed_dim'],
                init_weight=pretrained_vec,
                trainable=joint_config['pretrain_embed_tune'])

        if joint_config['use_char_rnn']:
            self.char_embed = nn.Embedding(joint_config['n_chars'],
                                           joint_config['char_embed_dim'],
                                           trainable=True)
            self.char_rnn = nn.MultiLayerLSTM(joint_config['char_embed_dim'],
                                              joint_config['char_rnn_dim'],
                                              bidirectional=True)

        if joint_config['use_pos']:
            self.pos_embed = nn.Embedding(len(pos_dict),
                                          joint_config['pos_embed_dim'],
                                          trainable=True)

        if joint_config['random_word_embed']:
            print('Random_word_embed: True')
            self.word_embed_tune = nn.Embedding(n_words,
                                                joint_config['word_embed_dim'],
                                                trainable=True)
            self.word_linear = nn.Linear(joint_config['word_embed_dim'] * 2,
                                         joint_config['word_embed_dim'],
                                         activation='relu')

        if joint_config['use_sentence_vec']:
            print('Use_sentence_vec (BERT): True')
            self.train_sent_embed = nn.Embedding(train_sent_arr.shape[0],
                                                 sent_vec_dim,
                                                 init_weight=train_sent_arr,
                                                 trainable=False,
                                                 name='trainSentEmbed',
                                                 model=self.sent_model)

            self.dev_sent_embed = nn.Embedding(dev_sent_arr.shape[0],
                                               sent_vec_dim,
                                               init_weight=dev_sent_arr,
                                               trainable=False,
                                               name='devSentEmbed')

            self.test_sent_embed = nn.Embedding(test_sent_arr.shape[0],
                                                sent_vec_dim,
                                                init_weight=test_sent_arr,
                                                trainable=False,
                                                name='testSentEmbed',
                                                model=self.sent_model)

            if joint_config['sent_vec_project'] > 0:
                print('Sentence_vec project to',
                      joint_config['sent_vec_project'])
                self.sent_project = nn.Linear(
                    sent_vec_dim,
                    joint_config['sent_vec_project'],
                    activation=joint_config['sent_vec_project_activation'])

        rnn_input = 0  # + config['char_rnn_dim'] * 2
        if joint_config['use_pretrain_embed']:
            rnn_input += joint_config['word_embed_dim']
            print('use_pretrain_embed:', joint_config['use_pretrain_embed'])

        if joint_config[
                'use_sentence_vec'] and not joint_config['cat_sent_after_rnn']:
            rnn_input += sent_vec_dim
            print('use_sentence_vec:', joint_config['use_sentence_vec'])

        if joint_config['use_pos']:
            rnn_input += joint_config['pos_embed_dim']
            print('use_pos:', joint_config['use_pos'])

        if joint_config['use_char_rnn']:
            rnn_input += joint_config['char_rnn_dim'] * 2
            print('use_char_rnn:', joint_config['use_char_rnn'])

        if joint_config['use_rnn_encoder']:
            self.encoder = nn.MultiLayerLSTM(
                rnn_input,
                joint_config['rnn_dim'],
                n_layer=joint_config['encoder_layer'],
                bidirectional=True,
                dropout_x=joint_config['dp_state'],
                dropout_h=joint_config['dp_state_h'])

        self.encoder_output_dim = 0
        if joint_config['use_rnn_encoder']:
            self.encoder_output_dim += joint_config['rnn_dim'] * 2

        elif joint_config['use_pretrain_embed']:
            self.encoder_output_dim += joint_config['word_embed_dim']
            if joint_config['use_pos']:
                self.encoder_output_dim += joint_config['pos_embed_dim']

        if joint_config['cat_sent_after_rnn'] and joint_config[
                'use_sentence_vec']:
            self.encoder_output_dim += sent_vec_dim

        if joint_config['encoder_project'] > 0:
            self.encoder_project = nn.Linear(self.encoder_output_dim,
                                             joint_config['encoder_project'])

        self.encoder_output_dim = joint_config[
            'encoder_project'] if joint_config[
                'encoder_project'] > 0 else self.encoder_output_dim

        # shift reduce parser
        self.shift_reduce = ShiftReduce(joint_config, self.encoder_output_dim,
                                        action_dict, ent_dict, tri_dict,
                                        arg_dict)
Esempio n. 7
0
    def __init__(self, model, statistics, options):
        super(SentenceEmbeddings, self).__init__(model)
        self.options = options

        self.ldims = options.lstm_dims

        if options.ext_embedding is not None:
            self.ext_embedding = nn.ExternalEmbedding(self,
                                                      options.ext_embedding)
            e_dim = self.ext_embedding.dim
            logger.info('Load external embedding. Vector dimensions %d',
                        self.ext_embedding.dim)
        else:
            self.ext_embedding = None
            e_dim = 0

        self.total_dims = options.wembedding_dims + options.pembedding_dims + options.supertag_embedding + e_dim

        rnn_dims = [self.total_dims] + [self.ldims * 2] * options.lstm_layers
        if self.options.highway_layers <= 0:
            self.rnn = nn.recurrents[options.rnn_type](self, rnn_dims)
        else:
            self.rnn = nn.HighWayRecurrentWrapper(
                self, rnn_dims, self.options.highway_layers,
                nn.recurrent_builders[options.rnn_type])

        if options.cembedding_dims > 0 and options.word_threshold > 1:
            self.char_embedding = nn.Embedding(self,
                                               list(statistics.characters),
                                               options.cembedding_dims)
            self.c_lstm = nn.recurrents[options.crnn_type](
                self, [options.cembedding_dims] +
                [options.wembedding_dims] * options.lstm_layers)
            self.freq_words = set(word
                                  for word, count in statistics.words.items()
                                  if count >= options.word_threshold)
            logger.info("Word embedding size: {}".format(len(self.freq_words)))
            self.word_embedding = nn.Embedding(self, self.freq_words,
                                               options.wembedding_dims)
        else:
            self.word_embedding = nn.Embedding(self, list(statistics.words),
                                               options.wembedding_dims)

        if options.pembedding_dims > 0:
            self.pos_embedding = nn.Embedding(self, list(statistics.postags),
                                              options.pembedding_dims)
        else:
            self.pos_embedding = None

        if options.supertag_embedding > 0:
            self.supertag_embedding = nn.EmbeddingFromDictionary(
                self,
                statistics.supertags,
                options.supertag_embedding,
                external_init=options.ext_supertag_embedding)
        else:
            self.supertag_embedding = None

        self.rel_embedding = nn.Embedding(self, list(statistics.labels),
                                          options.pembedding_dims, ())
        self.random = random.Random(168)