예제 #1
0
def local_acc(model, all_batches):
    acc = tff.sequence_sum(
        tff.sequence_map(
            tff.federated_computation(lambda b: batch_pre(model, b),
                                      BATCH_TYPE), all_batches))

    acc_l = tff.sequence_map(
        tff.federated_computation(lambda b: batch_pre(model, b), BATCH_TYPE),
        all_batches)
    return acc
예제 #2
0
def local_eval(model, all_batches):
    #
    return tff.sequence_sum(
        tff.sequence_map(
            tff.federated_computation(
                lambda b: batch_loss(model, b), BATCH_TYPE),
            all_batches))
def local_eval(model, all_batchs):
    #计算平均loss值时使用tff.sequence_average函数
    return tff.sequence_sum(
        #序列map函数,将all_batchs中的每个batch都进行loss值计算
        #tff.sequence_map与tff.sequence_reduce的差别在于map是并行计算,reduce是串行计算
        tff.sequence_map(
            tff.federated_computation(lambda b: batch_loss(model, b),
                                      BATCH_TYPE), all_batchs))
예제 #4
0
def local_eval(model, all_batches):
    # tff.sequence_sum Replace with `tff.sequence_average()` once implemented.
    return tff.sequence_sum(
        tff.sequence_map(
            #?????
            tff.federated_computation(lambda b: batch_loss(model, b),
                                      BATCH_TYPE),
            all_batches))