Example #1
0
    def Run(self, args):
        """This is what gets called when the user runs this command.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation.

    Returns:
      Some value that we want to have printed later.
    """
        instances = predict_utilities.ReadInstancesFromArgs(
            args.json_instances,
            args.text_instances,
            limit=INPUT_INSTANCES_LIMIT)

        model_or_version_ref = predict_utilities.ParseModelOrVersionRef(
            args.model, args.version)

        results = predict.Predict(model_or_version_ref, instances)

        if not args.IsSpecified('format'):
            # default format is based on the response.
            args.format = self._DefaultFormat(results.get('predictions'))

        return results
Example #2
0
    def Run(self, args):
        """This is what gets called when the user runs this command."""
        instances = predict_utilities.ReadInstancesFromArgs(
            args.json_instances, args.text_instances)

        # Start local prediction in a subprocess.
        proc = subprocess.Popen(
            ['python', local_predict.__file__, '--model-dir', args.model_dir],
            stdin=subprocess.PIPE,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE)

        # Pass the instances to the process that actually runs local prediction.
        for instance in instances:
            proc.stdin.write(json.dumps(instance) + '\n')
        proc.stdin.flush()

        # Get the results for the local prediction.
        output, err = proc.communicate()
        if proc.returncode != 0:
            raise LocalPredictRuntimeError(err)
        if err:
            log.warn(err)

        try:
            return json.loads(output)
        except ValueError:
            raise InvalidReturnValueError('The output for prediction is not '
                                          'in JSON format: ' + output)
Example #3
0
def _RunPredict(version, args):
    """Run ML Engine local prediction."""
    instances = predict_utilities.ReadInstancesFromArgs(
        args.json_instances, args.text_instances)
    sdk_root = config.Paths().sdk_root
    if not sdk_root:
        raise LocalPredictEnvironmentError(
            'You must be running an installed Cloud SDK to perform local '
            'prediction.')
    env = {'CLOUDSDK_ROOT': sdk_root}
    # We want to use whatever the user's Python was, before the Cloud SDK started
    # changing the PATH. That's where Tensorflow is installed.
    python_executables = files.SearchForExecutableOnPath('python')
    if not python_executables:
        # This doesn't have to be actionable because things are probably beyond help
        # at this point.
        raise LocalPredictEnvironmentError(
            'Something has gone really wrong; we can\'t find a valid Python '
            'executable on your PATH.')
    python_executable = python_executables[0]
    # Start local prediction in a subprocess.
    proc = subprocess.Popen([
        python_executable, local_predict.__file__, '--model-dir',
        args.model_dir, '--version', version
    ],
                            stdin=subprocess.PIPE,
                            stdout=subprocess.PIPE,
                            stderr=subprocess.PIPE,
                            env=env)

    # Pass the instances to the process that actually runs local prediction.
    for instance in instances:
        proc.stdin.write(json.dumps(instance) + '\n')
    proc.stdin.flush()

    # Get the results for the local prediction.
    output, err = proc.communicate()
    if proc.returncode != 0:
        raise LocalPredictRuntimeError(err)
    if err:
        log.warn(err)

    try:
        return json.loads(output)
    except ValueError:
        raise InvalidReturnValueError('The output for prediction is not '
                                      'in JSON format: ' + output)
Example #4
0
  def Run(self, args):
    """This is what gets called when the user runs this command.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation.

    Returns:
      Some value that we want to have printed later.
    """
    instances = predict_utilities.ReadInstancesFromArgs(
        args.json_instances, args.text_instances, limit=INPUT_INSTANCES_LIMIT)

    model_or_version_ref = predict_utilities.ParseModelOrVersionRef(
        args.model, args.version)

    results = predict.Predict(model_or_version_ref, instances)
    # Hack to make the results available to Format() method
    self.predictions = results.get('predictions')
    return results