def main(data_num): data = Input(shape=[28, 28, 1]) x = Flatten()(data) x = Dense(64, activation='relu')(x) x = Dense(64, activation='relu')(x) predictions = Dense(10, activation='softmax')(x) model = Model(inputs=data, outputs=predictions) model.load_weights("/tmp/mnist_keras.h5") if DISTRIBUTED: # using RDD api to do distributed evaluation sc = init_nncontext() # get data, pre-process and create TFDataset (images_data, labels_data) = mnist.read_data_sets("/tmp/mnist", "test") image_rdd = sc.parallelize(images_data[:data_num]) labels_rdd = sc.parallelize(labels_data[:data_num]) rdd = image_rdd.zip(labels_rdd) \ .map(lambda rec_tuple: [normalizer(rec_tuple[0], mnist.TRAIN_MEAN, mnist.TRAIN_STD)]) dataset = TFDataset.from_rdd(rdd, names=["features"], shapes=[[28, 28, 1]], types=[tf.float32], batch_per_thread=20) predictor = TFPredictor.from_keras(model, dataset) accuracy = predictor.predict().zip(labels_rdd).map( lambda x: np.argmax(x[0]) == x[1]).mean() print("predict accuracy is %s" % accuracy) else: # using keras api for local evaluation model.compile(optimizer='rmsprop', loss='sparse_categorical_crossentropy', metrics=['accuracy']) (images_data, labels_data) = mnist.read_data_sets("/tmp/mnist", "test") images_data = normalizer(images_data, mnist.TRAIN_MEAN, mnist.TRAIN_STD) result = model.evaluate(images_data, labels_data) print(model.metrics_names) print(result)
def _predict_distributed(self, x): predictor = TFPredictor.from_keras(self.model, x) return predictor.predict()