示例#1
0
def inference(opts):
    infer_graph, uid_embedding, mid_embedding, cat_embedding = generic_init_graph(
        opts, False)
    path = opts['model_path']
    if path is not None and os.path.exists(path + ".meta"):
        infer_graph.saver.restore(infer_graph.session, path)
        tf_log.info(f"model {path} restored")
    else:
        tf_log.info(f"Do not restore since no model under path {path}")

    iterations = VALIDATION_DATA_SIZE * opts['epochs'] / (opts['batch_size'] *
                                                          opts['replicas'])

    total_time = 0
    i = 0
    stored_arr = []
    tf_log.info(f"iterations: {iterations}")
    accs = []

    # Register the host embeddings with the session.
    with uid_embedding.register(infer_graph.session), mid_embedding.register(
            infer_graph.session), cat_embedding.register(infer_graph.session):
        while i < iterations:
            start = time.time()
            infer_graph.session.run(infer_graph.ops_val)
            prob, target, acc = infer_graph.session.run(infer_graph.outfeed)
            total_time = time.time() - start
            i += opts['batches_per_step']
            accuracy = np.mean(acc)
            accs.append(accuracy)
            prob_1 = prob.reshape(
                [opts['batches_per_step'] * opts['batch_size'], 2])
            prob_1 = prob_1[:, 0].tolist()
            target_1 = target.reshape(
                [opts['batches_per_step'] * opts['batch_size'], 2])
            target_1 = target_1[:, 0].tolist()
            for p, t in zip(prob_1, target_1):
                stored_arr.append([p, t])

            throughput = opts["batch_size"] * opts[
                "batches_per_step"] / total_time
            tf_log.info(
                f"i={i // opts['batches_per_step']}, validation accuracy: {accuracy:.4f}, throughput:{throughput}, latency:{total_time * 1000 / opts['batches_per_step']}"
            )
    total_time = time.time() - start
    test_auc = calc_auc(stored_arr)
    test_acc = np.mean(accs)
    tf_log.info(f"test_auc={test_auc:.4f} test_acc={test_acc:.4f}")
    infer_graph.session.close()
示例#2
0
def inference(opts):
    infer, uid_embedding, mid_embedding, cat_embedding = generic_graph(
        opts, False)
    infer.session.run(infer.init)
    infer.session.run(infer.iterator.initializer)

    path = opts['model_path']
    if path is not None and os.path.exists(path + ".meta"):
        infer.saver.restore(infer.session, path)
        tf_log.debug(f"model {path} restored")
    else:
        tf_log.debug(f"Do not restore since no model under path {path}")

    steps = VALIDATION_DATA_SIZE * opts['epochs'] / opts['batch_size'] / opts[
        "batches_per_step"]

    i = 0
    stored_arr = []
    tf_log.debug(f"steps: {steps}")
    accs = []
    total_time = 0
    with uid_embedding.register(infer.session), mid_embedding.register(
            infer.session), cat_embedding.register(infer.session):
        while i < steps:
            start = time.time()
            infer.session.run(infer.ops)
            prob, target, acc = infer.session.run(infer.outfeed)
            time_one_iteration = time.time() - start
            if i > 0:
                total_time = total_time + time_one_iteration
            i += 1
            accuracy = np.mean(acc)
            accs.append(accuracy)
            prob_1 = prob.reshape([
                opts['batches_per_step'] * opts['batch_size'],
                2 * opts['replicas']
            ])
            prob_1 = prob_1[:, 0].tolist()
            target_1 = target.reshape([
                opts['batches_per_step'] * opts['batch_size'],
                2 * opts['replicas']
            ])
            target_1 = target_1[:, 0].tolist()
            for p, t in zip(prob_1, target_1):
                stored_arr.append([p, t])
            throughput = opts["batch_size"] * opts[
                "batches_per_step"] / time_one_iteration
            tf_log.info(
                f"i={i // opts['batches_per_step']},validation accuracy: {accuracy}, throughput:{throughput}, latency:{time_one_iteration * 1000 / opts['batches_per_step']}"
            )
    test_auc = calc_auc(stored_arr)
    test_acc = np.mean(accs)
    tf_log.info(f"test_auc={test_auc:.4f} test_acc={test_acc:.4f}")

    infer.session.close()
    if steps > 1:
        total_recomm_num = opts["batch_size"] * (i -
                                                 1) * opts["batches_per_step"]
        throughput = float(total_recomm_num) / float(total_time)
        latency = float(total_time) * 1000 / float(
            (i - 1) * opts["batches_per_step"])
        tf_log.info(f"Total recommendations: {total_recomm_num:d}")
        tf_log.info(f"Process time in seconds is {total_time:.3f}")
        tf_log.info(f"recommendations/second is {throughput:.3f}")
        tf_log.info(f"latency in miliseconds is {latency:.3f}")