Пример #1
0
def eval_model(model,
               data,
               metric_meta,
               use_cuda=True,
               with_label=True,
               label_mapper=None,
               task_type=TaskType.Classification):
    if use_cuda:
        model.cuda()
    predictions = []
    golds = []
    scores = []
    ids = []
    metrics = {}
    for idx, (batch_info, batch_data) in enumerate(data):
        # if idx % 100 == 0:
        #     print("predicting {}".format(idx))
        batch_info, batch_data = Collater.patch_data(use_cuda, batch_info,
                                                     batch_data)
        score, pred, gold = model.predict(batch_info, batch_data)
        predictions.extend(pred)
        golds.extend(gold)
        scores.extend(score)
        ids.extend(batch_info['uids'])

    if task_type == TaskType.Span:
        from experiments.squad import squad_utils
        golds = squad_utils.merge_answers(ids, golds)
        predictions, scores = squad_utils.select_answers(
            ids, predictions, scores)
    if with_label:
        metrics = calc_metrics(metric_meta, golds, predictions, scores,
                               label_mapper)
    return metrics, predictions, scores, golds, ids
Пример #2
0
def eval_model(model,
               data,
               metric_meta,
               device,
               with_label=True,
               label_mapper=None,
               task_type=TaskType.Classification):
    predictions = []
    golds = []
    scores = []
    ids = []
    metrics = {}
    for (batch_info, batch_data) in data:
        batch_info, batch_data = Collater.patch_data(device, batch_info,
                                                     batch_data)
        score, pred, gold = model.predict(batch_info, batch_data)
        predictions.extend(pred)
        golds.extend(gold)
        scores.extend(score)
        ids.extend(batch_info['uids'])

    if task_type == TaskType.Span:
        from experiments.squad import squad_utils
        golds = squad_utils.merge_answers(ids, golds)
        predictions, scores = squad_utils.select_answers(
            ids, predictions, scores)
    if with_label:
        metrics = calc_metrics(metric_meta, golds, predictions, scores,
                               label_mapper)
    return metrics, predictions, scores, golds, ids
Пример #3
0
def eval_model(model,
               data,
               metric_meta,
               device,
               with_label=True,
               label_mapper=None,
               task_type=TaskType.Classification):
    predictions = []
    golds = []
    scores = []
    ids = []
    metrics = {}
    print("****device={}".format(device))
    for (batch_info, batch_data) in data:
        batch_info, batch_data = Collater.patch_data(device, batch_info,
                                                     batch_data)
        score, pred, gold = model.predict(batch_info, batch_data)
        predictions.extend(pred)
        golds.extend(gold)
        scores.extend(score)
        ids.extend(batch_info['uids'])

    if task_type == TaskType.Span:
        from experiments.squad import squad_utils
        golds = squad_utils.merge_answers(ids, golds)
        predictions, scores = squad_utils.select_answers(
            ids, predictions, scores)
    if with_label:
        metrics = calc_metrics(metric_meta, golds, predictions, scores,
                               label_mapper)
    for i in range(min(len(ids), 10)):
        print("{}\t{}\t{}\t{}\n".format(ids[i], predictions[i], scores[2 * i],
                                        scores[2 * i + 1]))

    #print("score heads={}".format(scores[:10]))
    return metrics, predictions, scores, golds, ids