def test_train(self): model = NumberRecognizeNN(Resource.INPUT_SIZE, Resource.OUTPUT_SIZE) r = Resource(self.TEST_DIR) trainer = Trainer(model, r) dp = DataProcessor() data, target = r.load_training_data() print("Test Train the model") trainer.train(data, target, epoch=5)
def test_model_api(self): model = NumberRecognizeNN(Resource.INPUT_SIZE, Resource.OUTPUT_SIZE) r = Resource(self.TEST_DIR) trainer = Trainer(model, r) dp = DataProcessor() data, target = r.load_training_data() api_test_size = 200 print("Train the model for API Test.") trainer.train(data[:-api_test_size], target[:-api_test_size], epoch=5) model_api = ModelAPI(r) predicted = model_api.predict(data[-api_test_size:]) teacher = target[-api_test_size:] score = accuracy_score(teacher, predicted) print("Model API score is {}".format(score))
def post(self): resp = {"result": str(-1)} data = self.get_arguments("data[]") r = Resource() if not os.path.isdir(r.model_path): from ml.model import NumberRecognizeNN from ml.trainer import Trainer model = NumberRecognizeNN(r.INPUT_SIZE, r.OUTPUT_SIZE) trainer = Trainer(model, r) x, y = r.load_training_data() trainer.train(x, y) api = ModelAPI(r) if len(data) > 0: _data = [float(d) for d in data] predicted = api.predict(_data) resp["result"] = str(predicted[0]) self.write(resp)
def train(data_file, batch_size, epoch, test_size): r = Resource() dp = DataProcessor() model = NumberRecognizeNN(Resource.INPUT_SIZE, Resource.OUTPUT_SIZE) try: dp.means, dp.stds = r.load_normalization_params() r.load_model(model) print("load the model") except Exception as ex: print("trained model does not exist.") x = None y = None if data_file: x, y = r.load_data(data_file) else: x, y = r.load_training_data() trainer = Trainer(model, r) print("begin training") trainer.train(x, y, batch_size=batch_size, epoch=epoch, test_size=test_size)
def run(logdir, model_name, opt_name, loss_name): logger.info(f'model_name: {model_name}') logger.info(f'opt_name: {opt_name}') model = get_model(model_name)( input_shape=(9, 9, 43), classes=MOVE_DIRECTION_LABEL_NUM, activation_name=FLAGS.activation_name) logger.info(model.summary()) if FLAGS.saved_model_path: model = tf.keras.models.load_model(FLAGS.saved_model_path) optimizer = get_optimizer(opt_name) loss = get_loss(loss_name) trainer = Trainer(model, logdir, optimizer, loss, factor=FLAGS.reduce_factor, patience=FLAGS.reduce_patience, min_lr=FLAGS.reduce_factor) with trainer.summary_writer.as_default(): tf.summary.text('parameters', FLAGS.flags_into_string(), step=0) dataset = SFEN(FLAGS.train_csv, FLAGS.valid_csv, FLAGS.epochs, FLAGS.batch_size) # 3143460 if FLAGS.logging_steps: logging_steps = FLAGS.logging_steps else: logging_steps = dataset.train_size // FLAGS.batch_size logger.info(f'logging_steps: {logging_steps}') trainer.train(dataset, logging_steps, FLAGS.learning_rate) trainer.save()
def post(self): # レスポンス用マップを作成 resp = {"result": str(-1)} # 送られてきたデータを受け取る data = self.get_arguments("data[]") r = Resource() # path が実在するディレクトリの場合 if not os.path.isdir(r.model_path): # model.pyのインポート from ml.model import NumberRecognizeNN # trainer.pyのインポート from ml.trainer import Trainer # モデル作成クラス (Chainer)をnew model = NumberRecognizeNN(r.INPUT_SIZE, r.OUTPUT_SIZE) # トレーニングクラスをnew trainer = Trainer(model, r) # トレーニングデータを取得 x, y = r.load_training_data() # トレーニング実施 trainer.train(x, y) # モデルをAPIに保存 api = ModelAPI(r) if len(data) > 0: _data = [float(d) for d in data] # 送られてきたデータから予測値を算出 predicted = api.predict(_data) # レスポンスにデータを格納 resp["result"] = str(predicted[0]) # データを送信 self.write(resp)