def train(self, to_static=False): np.random.seed(SEED) paddle.seed(SEED) paddle.framework.random._manual_program_seed(SEED) mnist = MNIST() if to_static: print("Successfully to apply @to_static.") build_strategy = paddle.static.BuildStrategy() # Why set `build_strategy.enable_inplace = False` here? # Because we find that this PASS strategy of PE makes dy2st training loss unstable. build_strategy.enable_inplace = False mnist = paddle.jit.to_static(mnist, build_strategy=build_strategy) optimizer = paddle.optimizer.Adam(learning_rate=0.001, parameters=mnist.parameters()) scaler = paddle.amp.GradScaler(init_loss_scaling=1024) mnist, optimizer = paddle.amp.decorate(models=mnist, optimizers=optimizer, level='O2', save_dtype='float32') loss_data = [] for epoch in range(self.epoch_num): start = time() for batch_id, data in enumerate(self.train_reader()): dy_x_data = np.array([x[0].reshape(1, 28, 28) for x in data]).astype('float32') y_data = np.array([x[1] for x in data ]).astype('int64').reshape(-1, 1) img = paddle.to_tensor(dy_x_data) label = paddle.to_tensor(y_data) label.stop_gradient = True with paddle.amp.auto_cast(enable=True, custom_white_list=None, custom_black_list=None, level='O2'): prediction, acc, avg_loss = mnist(img, label=label) scaled = scaler.scale(avg_loss) scaled.backward() scaler.minimize(optimizer, scaled) loss_data.append(avg_loss.numpy()[0]) # save checkpoint mnist.clear_gradients() if batch_id % 2 == 0: print( "Loss at epoch {} step {}: loss: {:}, acc: {}, cost: {}" .format(epoch, batch_id, avg_loss.numpy(), acc.numpy(), time() - start)) start = time() if batch_id == 10: break return loss_data
def train(self, to_static=False): paddle.seed(SEED) mnist = MNIST() if to_static: print("Successfully to apply @to_static.") mnist = paddle.jit.to_static(mnist) adam = AdamOptimizer(learning_rate=0.001, parameter_list=mnist.parameters()) scaler = paddle.amp.GradScaler(init_loss_scaling=1024) loss_data = [] for epoch in range(self.epoch_num): start = time() for batch_id, data in enumerate(self.train_reader()): dy_x_data = np.array([x[0].reshape(1, 28, 28) for x in data]).astype('float32') y_data = np.array([x[1] for x in data ]).astype('int64').reshape(-1, 1) img = paddle.to_tensor(dy_x_data) label = paddle.to_tensor(y_data) label.stop_gradient = True with paddle.amp.auto_cast(): prediction, acc, avg_loss = mnist(img, label=label) scaled = scaler.scale(avg_loss) scaled.backward() scaler.minimize(adam, scaled) loss_data.append(avg_loss.numpy()[0]) # save checkpoint mnist.clear_gradients() if batch_id % 10 == 0: print( "Loss at epoch {} step {}: loss: {:}, acc: {}, cost: {}" .format(epoch, batch_id, avg_loss.numpy(), acc.numpy(), time() - start)) start = time() if batch_id == 50: break return loss_data
class MnistPredictionServiceSubclass( mnist_pb2_grpc.MnistPredictionServiceServicer): def __init__(self): self.mnist = MNIST() def MnistPredict(self, request, context): request_map = request.inputs inputs = cv2.imdecode(np.frombuffer(request.inputs, dtype='uint8'), 1) predict_result, predict_probability = self.mnist.interface(inputs) logging.info("predict_result{}predict_probability{}".format( predict_result, predict_probability)) response = mnist_pb2.MnistPredictResponse() response.outputs = str(predict_result[0]) response.probability = predict_probability return response
import web import os, sys import io import base64 import numpy as np import json import cv2 sys.path.append("../train_test_mnist/") from test_mnist import MNIST mnist = MNIST() urls = ('/predict', 'MNIST_SERVER') class MNIST_SERVER(): def GET(self): pass def POST(self): try: form = web.input() except ValueError as e: return e data = {"success": False} image_file_value = base64.urlsafe_b64decode(form['image']) image = cv2.imdecode(np.frombuffer(image_file_value, dtype='uint8'), 1) predict_result, predict_probability = mnist.interface(image)
def __init__(self): self.mnist = MNIST()