示例#1
0
文件: train_cv.py 项目: xianyubai/fpl
                    for idx in data_idxs
                ]
                loss, pred_y, _ = model.predict(
                    tuple(map(Variable, batch_array)))
                valid_eval.update(cuda.to_cpu(loss.data), pred_y, batch)
                write_prediction(prediction_dict["predictions"], batch, pred_y)

            message_str = "Iter {}: train loss {} / ADE {} / FDE {}, valid loss {} / " \
                          "ADE {} / FDE {}, elapsed time: {} (s)"
            logger.info(
                message_str.format(iter_cnt + 1, train_eval("loss"),
                                   train_eval("ade"), train_eval("fde"),
                                   valid_eval("loss"), valid_eval("ade"),
                                   valid_eval("fde"),
                                   time.time() - st))
            train_eval.update_summary(summary, iter_cnt,
                                      ["loss", "ade", "fde"])
            valid_eval.update_summary(summary, iter_cnt,
                                      ["loss", "ade", "fde"])

            predictions = prediction_dict["predictions"]
            pred_list = [[
                pred for vk, v_dict in sorted(predictions.items())
                for fk, f_dict in sorted(v_dict.items())
                for pk, pred in sorted(f_dict.items()) if pred[8] == idx
            ] for idx in range(4)]

            error_rates = [
                np.mean([pred[7] for pred in preds]) for preds in pred_list
            ]
            logger.info("Towards {} / Away {} / Across {} / Other {}".format(
                *error_rates))
示例#2
0
文件: eval_cv.py 项目: xianyubai/fpl
    valid_eval = Evaluator("valid", args)

    logger.info("Evaluation...")
    chainer.config.train = False
    chainer.config.enable_backprop = False

    # Evaluation loop
    for itr, batch in enumerate(valid_iterator):
        batch_array = [convert.concat_examples([x[idx] for x in batch], args.gpu) for idx in data_idxs]
        loss, pred_y, prob = model.predict(tuple(map(Variable, batch_array)))
        valid_eval.update(cuda.to_cpu(loss.data), pred_y, batch)
        write_prediction(prediction_dict["predictions"], batch, pred_y)

    message_str = "Evaluation: valid loss {} / ADE {} / FDE {}"
    logger.info(message_str.format(valid_eval("loss"), valid_eval("ade"), valid_eval("fde")))
    valid_eval.update_summary(summary, -1, ["loss", "ade", "fde"])
    predictions = prediction_dict["predictions"]
    pred_list = [[pred for vk, v_dict in sorted(predictions.items())
                  for fk, f_dict in sorted(v_dict.items())
                  for pk, pred in sorted(f_dict.items()) if pred[8] == idx] for idx in range(4)]
    logger.info([len(x) for x in pred_list])

    error_rates = [np.mean([pred[7] for pred in preds]) for preds in pred_list]
    logger.info("Towards {} / Away {} / Across {} / Other {}".format(*error_rates))

    prediction_path = os.path.join(save_dir, "prediction.json")
    with open(prediction_path, "w") as f:
        json.dump(prediction_dict, f)

    summary.update("finished", 1)
    summary.write()