Beispiel #1
0
def inference(params):
    config_gpu()  # 配置GPU环境

    # 构建模型
    print("创建模型 ...")
    model = Seq2Seq(params=params)

    # 获取保存管理者
    print("创建模型保存器")
    checkpoint = tf.train.Checkpoint(Seq2Seq=model)
    checkpoint_manager = tf.train.CheckpointManager(checkpoint,
                                                    config.checkpoint_path,
                                                    max_to_keep=3)
    if checkpoint_manager.latest_checkpoint:
        print("加载最新保存器数据 {} ...".format(checkpoint_manager.latest_checkpoint))
        checkpoint.restore(checkpoint_manager.latest_checkpoint)
    # 训练模型
    print("开始预测 ...")
    inference_model(model, params)
Beispiel #2
0
def train(params):
    config_gpu()  # 配置GPU环境

    # 构建模型
    print("创建模型 ...")
    model = seq2seq(params=params)

    # 获取保存管理者
    print("创建模型保存器")
    checkpoint = tf.train.Checkpoint(Seq2Seq=model)
    checkpoint_manager = tf.train.CheckpointManager(checkpoint,
                                                    config.checkpoint_path,
                                                    max_to_keep=3)
    if checkpoint_manager.latest_checkpoint:
        print("加载最新保存器数据 {} ...".format(checkpoint_manager.latest_checkpoint))
        checkpoint.restore(checkpoint_manager.latest_checkpoint)
    else:
        print("初始化保存器.")
    # 训练模型
    print("开始训练 ...")
    train_model(model, params, checkpoint_manager)
Beispiel #3
0
def beam_search(params):
    config_gpu()  # 配置GPU
    # 加载数据集、模型
    _, _, test_X = load_data()
    model = Seq2Seq(params)

    start = time.time()
    print('使用集束搜索开始预测...')
    results = []
    dataset, steps_per_epoch = test_batch_generator(params['batch_size'])
    with tqdm(total=steps_per_epoch, position=0, leave=True) as tq:
        for (batch, batch_x) in enumerate(dataset.take(steps_per_epoch)):
            results += beam_decode(model, params, batch_x)
            tq.update(1)

    print('预测完成,耗时{}s\n处理至文件...'.format(time.time() - start))

    def result_proc(text):
        """
        对预测结果做最后处理
        :param text: 单条预测结果
        :return:
        """
        # text = text.lstrip(' ,!。')
        text = text.replace(' ', '')
        text = text.strip()
        if '<end>' in text:
            text = text[:text.index('<end>')]
        return text

    test_csv = pd.read_csv(config.test_set, encoding="UTF-8")
    # 赋值结果
    test_csv['Prediction'] = results
    # 提取ID和预测结果两列
    test_df = test_csv[['QID', 'Prediction']]
    # 结果处理
    test_df['Prediction'] = test_df['Prediction'].apply(result_proc)
    # 保存结果
    test_df.to_csv(config.inference_result_path, index=None, sep=',')
    print('已保存文件至{}'.format(config.inference_result_path))
            self.dataset["annotations"].append({
                "id":
                image_id,
                "image_id":
                image_id,
                "category_id":
                cid,
                "bbox": [x1, y1, x2 - x1, y2 - y1],
                "area": (x2 - x1) * (y2 - y1),
                "score":
                1,
                "iscrowd":
                False
            })


def evaluating(cocoGt, cocoDt):
    handler = COCOeval(cocoGt, cocoDt, "bbox")
    handler.evaluate()
    handler.accumulate()
    handler.summarize()


if __name__ == "__main__":
    config_gpu()
    params = build_params()
    result = run_batch(params)
    gt = GestureEval(None, params=params)
    dt = GestureEval(result, params)
    evaluating(gt, dt)