Esempio n. 1
0
 def _evaluate_distributed(self, dataset):
     predictor = TFPredictor(K.get_session(), self.metrics_tensors,
                             self.model.inputs + self.model.targets,
                             dataset)
     result = predictor.predict()
     metrics_sum = result.map(lambda x: x + [np.array(1.0)]).reduce(
         lambda a, b: elem_sum(a, b))
     length = len(metrics_sum) - 1
     for i in range(length):
         metrics_sum[i] /= metrics_sum[length]
     return metrics_sum[:length]
Esempio n. 2
0
def main():

    sc = init_nncontext()

    # get data, pre-process and create TFDataset
    (images_data, labels_data) = mnist.read_data_sets("/tmp/mnist", "test")
    image_rdd = sc.parallelize(images_data)
    labels_rdd = sc.parallelize(labels_data)
    rdd = image_rdd.zip(labels_rdd) \
        .map(lambda rec_tuple: [normalizer(rec_tuple[0], mnist.TRAIN_MEAN, mnist.TRAIN_STD),
                                np.array(rec_tuple[1])])

    dataset = TFDataset.from_rdd(rdd,
                                 names=["features", "labels"],
                                 shapes=[[28, 28, 1], [1]],
                                 types=[tf.float32, tf.int32],
                                 batch_per_thread=20)

    # construct the model from TFDataset
    images, labels = dataset.tensors

    labels = tf.squeeze(labels)

    with slim.arg_scope(lenet.lenet_arg_scope()):
        logits, end_points = lenet.lenet(images,
                                         num_classes=10,
                                         is_training=False)

    predictions = tf.to_int32(tf.argmax(logits, axis=1))
    correct = tf.expand_dims(tf.to_int32(tf.equal(predictions, labels)),
                             axis=1)

    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver.restore(sess, "/tmp/lenet/")

        predictor = TFPredictor(sess, [correct])

        accuracy = predictor.predict().mean()

        print("predict accuracy is %s" % accuracy)
def main(data_num):

    data = Input(shape=[28, 28, 1])

    x = Flatten()(data)
    x = Dense(64, activation='relu')(x)
    x = Dense(64, activation='relu')(x)
    predictions = Dense(10, activation='softmax')(x)

    model = Model(inputs=data, outputs=predictions)

    model.load_weights("/tmp/mnist_keras.h5")

    if DISTRIBUTED:
        # using RDD api to do distributed evaluation
        sc = init_nncontext()
        # get data, pre-process and create TFDataset
        (images_data, labels_data) = mnist.read_data_sets("/tmp/mnist", "test")
        image_rdd = sc.parallelize(images_data[:data_num])
        labels_rdd = sc.parallelize(labels_data[:data_num])
        rdd = image_rdd.zip(labels_rdd) \
            .map(lambda rec_tuple: [normalizer(rec_tuple[0], mnist.TRAIN_MEAN, mnist.TRAIN_STD)])

        dataset = TFDataset.from_rdd(rdd,
                                     names=["features"],
                                     shapes=[[28, 28, 1]],
                                     types=[tf.float32],
                                     batch_per_thread=20)
        predictor = TFPredictor.from_keras(model, dataset)

        accuracy = predictor.predict().zip(labels_rdd).map(
            lambda x: np.argmax(x[0]) == x[1]).mean()

        print("predict accuracy is %s" % accuracy)

    else:
        # using keras api for local evaluation
        model.compile(optimizer='rmsprop',
                      loss='sparse_categorical_crossentropy',
                      metrics=['accuracy'])

        (images_data, labels_data) = mnist.read_data_sets("/tmp/mnist", "test")
        images_data = normalizer(images_data, mnist.TRAIN_MEAN,
                                 mnist.TRAIN_STD)
        result = model.evaluate(images_data, labels_data)
        print(model.metrics_names)
        print(result)
Esempio n. 4
0
 def _predict_distributed(self, x):
     predictor = TFPredictor.from_keras(self.model, x)
     return predictor.predict()