Example #1
0
    def evaluate_recurrent(self, word_inds, tag_inds, test=False):

        fwd1 = self.fwd_lstm1.initial_state()
        back1 = self.back_lstm1.initial_state()

        fwd2 = self.fwd_lstm2.initial_state()
        back2 = self.back_lstm2.initial_state()

        sentence = []

        for (w, t) in zip(word_inds, tag_inds):
            wordvec = pycnn.lookup(self.model['word-embed'], w)
            tagvec = pycnn.lookup(self.model['tag-embed'], t)
            vec = pycnn.concatenate([wordvec, tagvec])
            sentence.append(vec)

        fwd1_out = []
        for vec in sentence:
            fwd1 = fwd1.add_input(vec)
            fwd_vec = fwd1.output()
            fwd1_out.append(fwd_vec)

        back1_out = []
        for vec in reversed(sentence):
            back1 = back1.add_input(vec)
            back_vec = back1.output()
            back1_out.append(back_vec)

        lstm2_input = []
        for (f, b) in zip(fwd1_out, reversed(back1_out)):
            lstm2_input.append(pycnn.concatenate([f, b]))

        fwd2_out = []
        for vec in lstm2_input:
            if self.droprate > 0 and not test:
                vec = pycnn.dropout(vec, self.droprate)
            fwd2 = fwd2.add_input(vec)
            fwd_vec = fwd2.output()
            fwd2_out.append(fwd_vec)

        back2_out = []
        for vec in reversed(lstm2_input):
            if self.droprate > 0 and not test:
                vec = pycnn.dropout(vec, self.droprate)
            back2 = back2.add_input(vec)
            back_vec = back2.output()
            back2_out.append(back_vec)

        fwd_out = [
            pycnn.concatenate([f1, f2])
            for (f1, f2) in zip(fwd1_out, fwd2_out)
        ]
        back_out = [
            pycnn.concatenate([b1, b2])
            for (b1, b2) in zip(back1_out, back2_out)
        ]

        return fwd_out, back_out[::-1]
Example #2
0
    def _get_char_representation(self, word):
        word_char_vectors = []
        for char in word.text:
            char_index = self.char_indexer.get_index(char)
            if char_index is None:
                print "Warning: Unexpected char '%s' (word='%s')" % (char, word.text)
                continue
            char_vector = lookup(self.model["char_lookup"], char_index)
            word_char_vectors.append(char_vector)

        lstm_forward = self.char_builders[0].initial_state()
        lstm_backward = self.char_builders[1].initial_state()

        for char_vector, reverse_char_vector in zip(word_char_vectors, reversed(word_char_vectors)):
            lstm_forward = lstm_forward.add_input(char_vector)
            lstm_backward = lstm_backward.add_input(reverse_char_vector)
        return concatenate([lstm_forward.output(), lstm_backward.output()])
Example #3
0
def build_tagging_graph(word_indices, model, builder):
    """
    build the computational graph
    :param word_indices: list of indices
    :param model: current model to access parameters
    :param builder: builder to create state combinations
    :return: forward and backward sequence
    """
    pycnn.renew_cg()
    f_init = builder.initial_state()

    # retrieve embeddings from the model and add noise
    word_embeddings = [pycnn.lookup(model["word_lookup"], w) for w in word_indices]
    word_embeddings = [pycnn.noise(we, args.noise) for we in word_embeddings]

    # compute the expressions for the forward pass
    forward_sequence = [x.output() for x in f_init.add_inputs(word_embeddings)]

    return forward_sequence
Example #4
0
    def _get_char_representation(self, word, use_dropout):
        word_char_vectors = []
        for char in word.text:
            char_index = self.char_indexer.get_index(char)
            if char_index is None:
                print "Warning: Unexpected char '%s' (word='%s')" % (char,
                                                                     word.text)
                continue
            char_vector = lookup(self.model["char_lookup"], char_index)
            word_char_vectors.append(char_vector)

        lstm_forward = self.char_builders[0].initial_state()
        lstm_backward = self.char_builders[1].initial_state()

        for char_vector, reverse_char_vector in zip(
                word_char_vectors, reversed(word_char_vectors)):
            lstm_forward = lstm_forward.add_input(char_vector)
            lstm_backward = lstm_backward.add_input(reverse_char_vector)
        return concatenate([lstm_forward.output(), lstm_backward.output()])
Example #5
0
def build_tagging_graph(words, model, builders):
    """
    build the computational graph
    :param words: list of indices
    :param model: current model to access parameters
    :param builders: builder to create state combinations
    :return: forward and backward sequence
    """
    pycnn.renew_cg()
    f_init, b_init = [b.initial_state() for b in builders]

    # retrieve embeddings from the model and add noise
    word_embeddings = [pycnn.lookup(model["lookup"], w) for w in words]
    word_embeddings = [pycnn.noise(we, 0.1) for we in word_embeddings]

    # compute the expressions for the forward and backward pass
    forward_sequence = [x.output() for x in f_init.add_inputs(word_embeddings)]
    backward_sequence = [x.output() for x in b_init.add_inputs(reversed(word_embeddings))]

    return list(zip(forward_sequence, reversed(backward_sequence)))
Example #6
0
if args.target in ['joint']:
    pOutAge = model.add_parameters("OUT_AGE", (num_labels, MLP_HIDDEN_LAYER_SIZE))
    biasOutAge = model.add_parameters("BIAS_OUT_AGE", (num_labels))
elif args.target in ['age', 'both']:
    pOutAge = model.add_parameters("OUT_AGE", (len(age_labels), MLP_HIDDEN_LAYER_SIZE))
    biasOutAge = model.add_parameters("BIAS_OUT_AGE", (len(age_labels)))

if args.target in ['gender', 'both']:
    pOutGender = model.add_parameters("OUT2", (len(gender_labels), MLP_HIDDEN_LAYER_SIZE))
    biasOutGender = model.add_parameters("BIAS_OUT2", (len(gender_labels)))


print("declared variables", file=sys.stderr)

pycnn.renew_cg()
new_word_embeddings = [pycnn.lookup(model["word_lookup"], w) for w in train[0][0][0]]
print(new_word_embeddings)

pycnn.conv1d_narrow(new_word_embeddings, pycnn.cg())

sys.exit()


def build_tagging_graph(word_indices, model, builder):
    """
    build the computational graph
    :param word_indices: list of indices
    :param model: current model to access parameters
    :param builder: builder to create state combinations
    :return: forward and backward sequence
    """
Example #7
0
 def _get_word_embedding(self, word):
     word_index = self.word_indexer.get_index(word.text.lower()) or self._unk_word_index
     return lookup(self.model["word_lookup"], word_index)
Example #8
0
 def _get_word_embedding(self, word, use_dropout):
     word_index = self.word_indexer.get_index(
         word.text.lower()) or self._unk_word_index
     if use_dropout and random.random() < 0.5:
         word_index = self._unk_word_index
     return lookup(self.model["word_lookup"], word_index)
Example #9
0
 def _get_word_embedding(self, word):
     word_index = self.word_indexer.get_index(
         word.text.lower()) or self._unk_word_index
     return lookup(self.model["word_lookup"], word_index)
Example #10
0
 def _get_word_embedding(self, word, use_dropout):
     word_index = self.word_indexer.get_index(word.text.lower()) or self._unk_word_index
     if use_dropout and random.random() < 0.5:
         word_index = self._unk_word_index
     return lookup(self.model["word_lookup"], word_index)