コード例 #1
0
def decode(phrasebook, mode, decoder, attach, relate=None):
    """
    Decode every instance in the attachment table (predicting
    relations too if we have the data/model for it).
    Return the predictions made

    :type attach: DataAndModel
    :type relate: DataAndModel
    """

    # TODO issue #9: check that call to learner can be uniform
    # with 2 parameters (as logistic), as the documentation is
    # inconsistent on this
    if (relate is not None and mode != DecodingMode.post_label):
        prob_distrib = _combine_probs(phrasebook, attach, relate)
    elif is_perceptron_model(attach.model):
        # home-made online models
        prob_distrib = _get_attach_prob_perceptron(phrasebook, attach)
    else:
        # orange-based models
        prob_distrib = _get_attach_prob_orange(phrasebook, attach)
    # print prob_distrib

    # get prediction (input is just prob_distrib)
    # hence the apparent redundancy here
    # TODO: issue #8: PM CHECK if works with nbest decoding
    predicted = decoder.decode(prob_distrib)

    if mode == DecodingMode.post_label:
        predicted = [_add_labels(phrasebook, x, relate) for x in predicted]

    return predicted
コード例 #2
0
def _combine_single_prob(attach, relate, att, rel):
    """return the best relation for a given EDU pair and its
    joint probability with the pair being attached

    helper for _combine_prob

    :rtype (float, string)
    """
    if is_perceptron_model(attach.model):
        if not attach.model.use_prob:
            raise DecoderException("ERROR: Trying to output probabilities "
                                   "while Perceptron parametrized with "
                                   "use_prob=False!")
        p_attach = attach.model.get_scores([att])[0][2]
    else:
        p_attach = _get_inst_attach_orange(attach.model, att)

    rel_prob, best_rel = _get_inst_relate_orange(relate.model, rel)

    return (p_attach * rel_prob, best_rel)
コード例 #3
0
ファイル: control.py プロジェクト: padenis/attelo
def _combine_single_prob(attach, relate, att, rel):
    """return the best relation for a given EDU pair and its
    joint probability with the pair being attached

    helper for _combine_prob

    :rtype (float, string)
    """
    if is_perceptron_model(attach.model):
        if not attach.model.use_prob:
            raise DecoderException("ERROR: Trying to output probabilities "
                                   "while Perceptron parametrized with "
                                   "use_prob=False!")
        p_attach = attach.model.get_scores([att])[0][2]
    else:
        p_attach = _get_inst_attach_orange(attach.model, att)

    rel_prob, best_rel = _get_inst_relate_orange(relate.model, rel)

    return (p_attach * rel_prob, best_rel)
コード例 #4
0
ファイル: control.py プロジェクト: chloebt/attelo
def decode(config, decoder, attach, relate=None):
    """
    Decode every instance in the attachment table (predicting
    relations too if we have the data/model for it).
    Return the predictions made

    TODO: check that call to learner can be uniform with 2 parameters (as
    logistic), as the documentation is inconsistent on this

    :type attach: DataAndModel
    :type relate: DataAndModel
    """
    if relate is not None and not config.post_labelling:
        prob_distrib = _combine_probs(config.phrasebook,
                                      attach, relate)
    elif is_perceptron_model(attach.model):
        # home-made online models
        prob_distrib = _get_attach_prob_perceptron(config, attach)
    else:
        # orange-based models
        prob_distrib = _get_attach_prob_orange(config, attach)
    # print prob_distrib

    # get prediction (input is just prob_distrib)
    # not all decoders support the threshold keyword argument
    # hence the apparent redundancy here
    if config.threshold is not None:
        predicted = decoder(prob_distrib,
                            threshold=config.threshold,
                            use_prob=config.use_prob)
    else:
        predicted = decoder(prob_distrib,
                            use_prob=config.use_prob)

    if config.post_labelling:
        predicted = _add_labels(config.phrasebook, predicted, relate)

    return predicted
コード例 #5
0
ファイル: control.py プロジェクト: chloebt/attelo
def decode(config, decoder, attach, relate=None):
    """
    Decode every instance in the attachment table (predicting
    relations too if we have the data/model for it).
    Return the predictions made

    TODO: check that call to learner can be uniform with 2 parameters (as
    logistic), as the documentation is inconsistent on this

    :type attach: DataAndModel
    :type relate: DataAndModel
    """
    if relate is not None and not config.post_labelling:
        prob_distrib = _combine_probs(config.phrasebook, attach, relate)
    elif is_perceptron_model(attach.model):
        # home-made online models
        prob_distrib = _get_attach_prob_perceptron(config, attach)
    else:
        # orange-based models
        prob_distrib = _get_attach_prob_orange(config, attach)
    # print prob_distrib

    # get prediction (input is just prob_distrib)
    # not all decoders support the threshold keyword argument
    # hence the apparent redundancy here
    if config.threshold is not None:
        predicted = decoder(prob_distrib,
                            threshold=config.threshold,
                            use_prob=config.use_prob)
    else:
        predicted = decoder(prob_distrib, use_prob=config.use_prob)

    if config.post_labelling:
        predicted = _add_labels(config.phrasebook, predicted, relate)

    return predicted
コード例 #6
0
ファイル: control.py プロジェクト: padenis/attelo
def decode(phrasebook, mode, decoder, attach,
           relate=None):
    """
    Decode every instance in the attachment table (predicting
    relations too if we have the data/model for it).
    Return the predictions made

    :type attach: DataAndModel
    :type relate: DataAndModel
    """

    # TODO issue #9: check that call to learner can be uniform
    # with 2 parameters (as logistic), as the documentation is
    # inconsistent on this
    if (relate is not None and
            mode != DecodingMode.post_label):
        prob_distrib = _combine_probs(phrasebook,
                                      attach, relate)
    elif is_perceptron_model(attach.model):
        # home-made online models
        prob_distrib = _get_attach_prob_perceptron(phrasebook, attach)
    else:
        # orange-based models
        prob_distrib = _get_attach_prob_orange(phrasebook, attach)
    # print prob_distrib

    # get prediction (input is just prob_distrib)
    # hence the apparent redundancy here
    # TODO: issue #8: PM CHECK if works with nbest decoding
    predicted = decoder.decode(prob_distrib)

    if mode == DecodingMode.post_label:
        predicted = [_add_labels(phrasebook, x, relate)
                     for x in predicted]

    return predicted