def testReadInstancesFromArgs_MoreThanOneType(self): with self.AssertRaisesExceptionMatches( predict_utilities.InvalidInstancesFileError, 'Exactly one of --json-request, --json-instances and --text-instances ' 'must be specified.'): predict_utilities.ReadInstancesFromArgs(None, 'foo.json', 'bar.txt') with self.AssertRaisesExceptionMatches( predict_utilities.InvalidInstancesFileError, 'Exactly one of --json-request, --json-instances and --text-instances ' 'must be specified.'): predict_utilities.ReadInstancesFromArgs('foo.json', None, 'bar.txt') with self.AssertRaisesExceptionMatches( predict_utilities.InvalidInstancesFileError, 'Exactly one of --json-request, --json-instances and --text-instances ' 'must be specified.'): predict_utilities.ReadInstancesFromArgs('foo.json', 'bar.json', None) with self.AssertRaisesExceptionMatches( predict_utilities.InvalidInstancesFileError, 'Exactly one of --json-request, --json-instances and --text-instances ' 'must be specified.'): predict_utilities.ReadInstancesFromArgs('foo.json', 'baz.json', 'bar.txt')
def _Run(args): """This is what gets called when the user runs this command. Args: args: an argparse namespace. All the arguments that were provided to this command invocation. Returns: A json object that contains predictions. """ instances = predict_utilities.ReadInstancesFromArgs( args.json_request, args.json_instances, args.text_instances, limit=INPUT_INSTANCES_LIMIT) with endpoint_util.MlEndpointOverrides(region=args.region): model_or_version_ref = predict_utilities.ParseModelOrVersionRef( args.model, args.version) if (args.signature_name is None and predict_utilities.CheckRuntimeVersion(args.model, args.version)): log.status.Print( 'You are running on a runtime version >= 1.8. ' 'If the signature defined in the model is ' 'not serving_default then you must specify it via ' '--signature-name flag, otherwise the command may fail.') results = predict.Predict( model_or_version_ref, instances, signature_name=args.signature_name) if not args.IsSpecified('format'): # default format is based on the response. args.format = predict_utilities.GetDefaultFormat( results.get('predictions')) return results
def Run(self, args): """This is what gets called when the user runs this command. Args: args: an argparse namespace. All the arguments that were provided to this command invocation. Returns: Some value that we want to have printed later. """ instances = predict_utilities.ReadInstancesFromArgs( args.json_instances, args.text_instances, limit=INPUT_INSTANCES_LIMIT) model_or_version_ref = predict_utilities.ParseModelOrVersionRef( args.model, args.version) results = predict.Predict(model_or_version_ref, instances) if not args.IsSpecified('format'): # default format is based on the response. args.format = predict_utilities.GetDefaultFormat( results.get('predictions')) return results
def RunPredict(model_dir, json_instances=None, text_instances=None, framework='tensorflow', signature_name=None): """Run ML Engine local prediction.""" instances = predict_utilities.ReadInstancesFromArgs(json_instances, text_instances) sdk_root = config.Paths().sdk_root if not sdk_root: raise LocalPredictEnvironmentError( 'You must be running an installed Cloud SDK to perform local ' 'prediction.') # Inheriting the environment preserves important variables in the child # process. In particular, LD_LIBRARY_PATH under linux and PATH under windows # could be used to point to non-standard install locations of CUDA and CUDNN. # If not inherited, the child process could fail to initialize Tensorflow. env = os.environ.copy() env['CLOUDSDK_ROOT'] = sdk_root # We want to use whatever the user's Python was, before the Cloud SDK started # changing the PATH. That's where Tensorflow is installed. python_executables = files.SearchForExecutableOnPath('python') # Need to ensure that ml_sdk is in PYTHONPATH for the import in # local_predict to succeed. orig_py_path = ':' + env.get('PYTHONPATH') if env.get('PYTHONPATH') else '' env['PYTHONPATH'] = (os.path.join(sdk_root, 'lib', 'third_party', 'ml_sdk') + orig_py_path) if not python_executables: # This doesn't have to be actionable because things are probably beyond help # at this point. raise LocalPredictEnvironmentError( 'Something has gone really wrong; we can\'t find a valid Python ' 'executable on your PATH.') # Use python found on PATH or local_python override if set python_executable = (properties.VALUES.ml_engine.local_python.Get() or python_executables[0]) predict_args = ['--model-dir', model_dir, '--framework', framework] if signature_name: predict_args += ['--signature-name', signature_name] # Start local prediction in a subprocess. proc = subprocess.Popen( [python_executable, local_predict.__file__] + predict_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) # Pass the instances to the process that actually runs local prediction. for instance in instances: proc.stdin.write(json.dumps(instance) + '\n') proc.stdin.flush() # Get the results for the local prediction. output, err = proc.communicate() if proc.returncode != 0: raise LocalPredictRuntimeError(err) if err: log.warning(err) try: return json.loads(output) except ValueError: raise InvalidReturnValueError('The output for prediction is not ' 'in JSON format: ' + output)
def testReadInstancesFromArgs_Text(self): instances_file = self.Touch(self.temp_path, 'instances.txt', contents=b'foo\nbar') self.assertEqual( predict_utilities.ReadInstancesFromArgs(None, None, instances_file), ['foo', 'bar'])
def testReadInstancesFromArgs_WithBOM(self): instances_file = self.Touch(self.temp_path, 'instances.json', contents=b'\xef\xbb\xbf{"a": "b"}\n') self.assertEqual( predict_utilities.ReadInstancesFromArgs(None, instances_file, None), [{ 'a': 'b' }])
def testReadInstancesFromArgs_JsonRequest(self): request_file = self.Touch(self.temp_path, 'request.json', contents=b'{"instances": [{"a": "b"}]}\n') self.assertEqual( predict_utilities.ReadInstancesFromArgs(request_file, None, None), [{ 'a': 'b' }])
def RunPredict(model_dir, json_instances=None, text_instances=None): """Run ML Engine local prediction.""" instances = predict_utilities.ReadInstancesFromArgs(json_instances, text_instances) sdk_root = config.Paths().sdk_root if not sdk_root: raise LocalPredictEnvironmentError( 'You must be running an installed Cloud SDK to perform local ' 'prediction.') # Inheriting the environment preserves important variables in the child # process. In particular, LD_LIBRARY_PATH under linux and PATH under windows # could be used to point to non-standard install locations of CUDA and CUDNN. # If not inherited, the child process could fail to initialize Tensorflow. env = os.environ.copy() env['CLOUDSDK_ROOT'] = sdk_root # We want to use whatever the user's Python was, before the Cloud SDK started # changing the PATH. That's where Tensorflow is installed. python_executables = files.SearchForExecutableOnPath('python') if not python_executables: # This doesn't have to be actionable because things are probably beyond help # at this point. raise LocalPredictEnvironmentError( 'Something has gone really wrong; we can\'t find a valid Python ' 'executable on your PATH.') python_executable = python_executables[0] # Start local prediction in a subprocess. proc = subprocess.Popen( [python_executable, local_predict.__file__, '--model-dir', model_dir], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) # Pass the instances to the process that actually runs local prediction. for instance in instances: proc.stdin.write(json.dumps(instance) + '\n') proc.stdin.flush() # Get the results for the local prediction. output, err = proc.communicate() if proc.returncode != 0: raise LocalPredictRuntimeError(err) if err: log.warn(err) try: return json.loads(output) except ValueError: raise InvalidReturnValueError('The output for prediction is not ' 'in JSON format: ' + output)
def _RunPredict(args): """Run ML Engine local prediction.""" instances = predict_utilities.ReadInstancesFromArgs( args.json_instances, args.text_instances) sdk_root = config.Paths().sdk_root if not sdk_root: raise LocalPredictEnvironmentError( 'You must be running an installed Cloud SDK to perform local ' 'prediction.') env = {'CLOUDSDK_ROOT': sdk_root} # We want to use whatever the user's Python was, before the Cloud SDK started # changing the PATH. That's where Tensorflow is installed. python_executables = files.SearchForExecutableOnPath('python') if not python_executables: # This doesn't have to be actionable because things are probably beyond help # at this point. raise LocalPredictEnvironmentError( 'Something has gone really wrong; we can\'t find a valid Python ' 'executable on your PATH.') python_executable = python_executables[0] # Start local prediction in a subprocess. proc = subprocess.Popen([ python_executable, local_predict.__file__, '--model-dir', args.model_dir ], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) # Pass the instances to the process that actually runs local prediction. for instance in instances: proc.stdin.write(json.dumps(instance) + '\n') proc.stdin.flush() # Get the results for the local prediction. output, err = proc.communicate() if proc.returncode != 0: raise LocalPredictRuntimeError(err) if err: log.warn(err) try: return json.loads(output) except ValueError: raise InvalidReturnValueError('The output for prediction is not ' 'in JSON format: ' + output)
def testReadInstancesFromArgs_TextStdin(self): self.WriteInput('foo\nbar') self.assertEqual( predict_utilities.ReadInstancesFromArgs(None, None, '-'), ['foo', 'bar'])
def testReadInstancesFromArgs_JsonStdin(self): self.WriteInput('{"a": "b"}') self.assertEqual( predict_utilities.ReadInstancesFromArgs(None, '-', None), [{ 'a': 'b' }])
def testReadInstancesFromArgs_NoInstances(self): with self.AssertRaisesExceptionMatches( predict_utilities.InvalidInstancesFileError, 'Exactly one of --json-request, --json-instances and --text-instances ' 'must be specified.'): predict_utilities.ReadInstancesFromArgs(None, None, None)