Exemple #1
0
def fit(words, tags, labels, model, builders):
    """
    compute joint error of the
    :param words: list of indices
    :param tags: list of indices
    :param labels: index
    :param model: current model to access parameters
    :param builders: builder to create state combinations
    :return: joint error
    """
    # retrieve model parameters
    if MLP:
        H = pycnn.parameter(pH)
        O = pycnn.parameter(pO)
    else:
        O = pycnn.parameter(pO)

    errs = []
    for (forward_state, backward_state), tag in zip(build_tagging_graph(words, model, builders), tags):
        f_b = pycnn.concatenate([forward_state, backward_state])
        if MLP:
            # TODO: add bias terms
            r_t = O * (pycnn.tanh(H * f_b))
        else:
            r_t = O * f_b
        err = pycnn.pickneglogsoftmax(r_t, tag)
        errs.append(err)

    return pycnn.esum(errs)
Exemple #2
0
def predict(sent, model, builders):
    """
    predict tags and demographic labels
    :param sent:
    :param model:
    :param builders:
    :return: tag and label predictions
    """
    if MLP:
        H = pycnn.parameter(pH)
        O = pycnn.parameter(pO)
    else:
        O = pycnn.parameter(pO)

    tags = []
    for forward_state, backward_state in build_tagging_graph(words, model, builders):
        if MLP:
            r_t = O * (pycnn.tanh(H * pycnn.concatenate([forward_state, backward_state])))
        else:
            r_t = O * pycnn.concatenate([forward_state, backward_state])

        out = pycnn.softmax(r_t)
        chosen = np.argmax(out.npvalue())
        tags.append(vocab_tags.i2w[chosen])

    return tags
Exemple #3
0
        def add_input(self, input_vec):
            """
            Note that this function updates the existing State object!
            """
            x = pycnn.concatenate([input_vec, self.h])

            i = pycnn.logistic(self.W_i * x + self.b_i)
            f = pycnn.logistic(self.W_f * x + self.b_f)
            g = pycnn.tanh(self.W_c * x + self.b_c)
            o = pycnn.logistic(self.W_o * x + self.b_o)

            c = pycnn.cwise_multiply(f, self.c) + pycnn.cwise_multiply(i, g)
            h = pycnn.cwise_multiply(o, pycnn.tanh(c))

            self.c = c
            self.h = h
            self.outputs.append(h)

            return self
Exemple #4
0
def attend(model, input_vectors, state):
    w1 = pc.parameter(model['attention_w1'])
    w2 = pc.parameter(model['attention_w2'])
    v = pc.parameter(model['attention_v'])
    attention_weights = []

    w2dt = w2*pc.concatenate(list(state.s()))
    for input_vector in input_vectors:
        attention_weight = v*pc.tanh(w1*input_vector + w2dt)
        attention_weights.append(attention_weight)
    attention_weights = pc.softmax(pc.concatenate(attention_weights))
    output_vectors = pc.esum([vector*attention_weight for vector, attention_weight in zip(input_vectors, attention_weights)])
    return output_vectors
Exemple #5
0
def attend(model, input_vectors, state):
    w1 = pc.parameter(model['attention_w1'])
    w2 = pc.parameter(model['attention_w2'])
    v = pc.parameter(model['attention_v'])
    attention_weights = []

    w2dt = w2*pc.concatenate(list(state.s()))
    for input_vector in input_vectors:
        attention_weight = v*pc.tanh(w1*input_vector + w2dt)
        attention_weights.append(attention_weight)
    attention_weights = pc.softmax(pc.concatenate(attention_weights))
    output_vectors = pc.esum([vector*attention_weight for vector, attention_weight in zip(input_vectors, attention_weights)])
    return output_vectors
def predict(word_indices, model, builder, target):
    """
    predict demographic label
    :param word_indices:
    :param model:
    :param builder:
    :return: tag and label predictions
    """
    forward_states = build_tagging_graph(word_indices, model, builder)
    final_state = forward_states[-1]

    H = pycnn.parameter(pH)
    bias_H = pycnn.parameter(biasH)
    H2 = pycnn.parameter(pH2)
    bias_H2 = pycnn.parameter(biasH2)

    if target in ['age', 'both', 'joint']:
        O = pycnn.parameter(pOutAge)
        bias_O = pycnn.parameter(biasOutAge)
    elif target == 'gender':
        O = pycnn.parameter(pOutGender)
        bias_O = pycnn.parameter(biasOutGender)

    if target == 'both':
        O2 = pycnn.parameter(pOutGender)
        bias_O2 = pycnn.parameter(biasOutGender)


    if target == 'both':
        # hidden = bias_H2 + pycnn.tanh(H2 * (bias_H + pycnn.tanh(H * final_state)))
        hidden = bias_H + pycnn.tanh(H * final_state)
        r_age = bias_O + (O * hidden)
        r_gender = bias_O2 + (O2 * hidden)

        out_age = pycnn.softmax(r_age)
        out_gender = pycnn.softmax(r_gender)

        return [np.argmax(out_age.npvalue()), np.argmax(out_gender.npvalue())]

    else:
        # r_t = bias_O + (O * (bias_H2 + pycnn.tanh(H2 * (bias_H + pycnn.tanh(H * final_state)))))
        r_t = bias_O + (O * (bias_H + (H * final_state)))

        out = pycnn.softmax(r_t)
        chosen = np.argmax(out.npvalue())

        return chosen
Exemple #7
0
        def __init__(self, lstm):
            self.lstm = lstm

            self.outputs = []

            self.c = pycnn.parameter(self.lstm.c0)
            self.h = pycnn.tanh(self.c)

            self.W_i = pycnn.parameter(self.lstm.W_i)
            self.b_i = pycnn.parameter(self.lstm.b_i)

            self.W_f = pycnn.parameter(self.lstm.W_f)
            self.b_f = pycnn.parameter(self.lstm.b_f)

            self.W_c = pycnn.parameter(self.lstm.W_c)
            self.b_c = pycnn.parameter(self.lstm.b_c)

            self.W_o = pycnn.parameter(self.lstm.W_o)
            self.b_o = pycnn.parameter(self.lstm.b_o)