Exemple #1
0
def test(net,
         test_data,
         metric,
         use_gpus,
         calc_weight_count=False,
         extended_log=False):
    tic = time.time()

    predictor = test_data["predictor_class"](base_model=net)
    if use_gpus:
        predictor.to_gpu()

    if calc_weight_count:
        weight_count = net.count_params()
        logging.info("Model: {} trainable parameters".format(weight_count))

    in_values, out_values, rest_values = apply_to_iterator(
        predictor.predict,
        test_data["iterator"],
        hook=ProgressHook(test_data["ds_len"]))
    del in_values

    pred_labels, = out_values
    gt_labels, = rest_values

    labels = iter(gt_labels)
    preds = iter(pred_labels)
    for label, pred in zip(labels, preds):
        metric.update(label, pred)

    accuracy_msg = report_accuracy(metric=metric, extended_log=extended_log)
    logging.info("Test: {}".format(accuracy_msg))
    logging.info("Time cost: {:.4f} sec".format(time.time() - tic))
Exemple #2
0
def test(net, test_data, metric, calc_weight_count=False, extended_log=False):
    tic = time.time()

    predictor = Predictor(model=net, transform=None)

    if calc_weight_count:
        weight_count = net.count_params()
        logging.info("Model: {} trainable parameters".format(weight_count))

    _, out_values, rest_values = apply_to_iterator(
        func=predictor,
        iterator=test_data["iterator"],
        hook=ProgressHook(test_data["ds_len"]))
    assert (len(rest_values) == 1)
    assert (len(out_values) == 1)

    if False:
        labels = iter(rest_values[0])
        preds = iter(out_values[0])
        for label, pred in zip(labels, preds):
            metric.update(label, pred)
    else:
        import numpy as np
        metric.update(labels=np.array(list(rest_values[0])),
                      preds=np.array(list(out_values[0])))

    accuracy_msg = report_accuracy(metric=metric, extended_log=extended_log)
    logging.info("Test: {}".format(accuracy_msg))
    logging.info("Time cost: {:.4f} sec".format(time.time() - tic))
Exemple #3
0
def test(net, test_data, metric, calc_weight_count=False, extended_log=False):
    """
    Main test routine.

    Parameters:
    ----------
    net : Chain
        Model.
    test_data : dict
        Data loader.
    metric : EvalMetric
        Metric object instance.
    calc_weight_count : bool, default False
        Whether to calculate count of weights.
    extended_log : bool, default False
        Whether to log more precise accuracy values.
    """
    tic = time.time()

    predictor = Predictor(model=net, transform=None)

    if calc_weight_count:
        weight_count = net.count_params()
        logging.info("Model: {} trainable parameters".format(weight_count))

    in_values, out_values, rest_values = apply_to_iterator(
        func=predictor,
        iterator=test_data["iterator"],
        hook=ProgressHook(test_data["ds_len"]))
    assert (len(rest_values) == 1)
    assert (len(out_values) == 1)
    assert (len(in_values) == 1)

    if True:
        labels = iter(rest_values[0])
        preds = iter(out_values[0])
        inputs = iter(in_values[0])
        for label, pred, inputi in zip(labels, preds, inputs):
            metric.update(label, pred)
            del label
            del pred
            del inputi
    else:
        import numpy as np
        metric.update(labels=np.array(list(rest_values[0])),
                      preds=np.array(list(out_values[0])))

    accuracy_msg = report_accuracy(metric=metric, extended_log=extended_log)
    logging.info("Test: {}".format(accuracy_msg))
    logging.info("Time cost: {:.4f} sec".format(time.time() - tic))