def run(predictor: Predictor, input_file: IO, output_file: Optional[IO], print_to_console: bool) -> None: for line in input_file: data = json.loads(line) result = predictor.predict_json(data) output = json.dumps(result) if print_to_console: print(output) if output_file: output_file.write(output + "\n")
def run(predictor: Predictor, input_file: IO, output_file: Optional[IO], print_to_console: bool, cuda_device: int) -> None: for line in input_file: if not line.isspace(): data = json.loads(line) result = predictor.predict_json(data, cuda_device) output = json.dumps(result) if print_to_console: print(output) if output_file: output_file.write(output + "\n")
def _caching_prediction(model: Predictor, data: str) -> JsonDict: """ Just a wrapper around ``model.predict_json`` that allows us to use a cache decorator. """ return model.predict_json(json.loads(data))