コード例 #1
0
    def Run(self, args):
        """Run the configure-docker command."""
        if not file_utils.SearchForExecutableOnPath(
                'docker-credential-gcloud'):
            log.warning(
                '`docker-credential-gcloud` not in system PATH.\n'
                'gcloud\'s Docker credential helper can be configured but '
                'it will not work until this is corrected.')

        current_config = cred_utils.Configuration.ReadFromDisk()

        if file_utils.SearchForExecutableOnPath('docker'):
            if not current_config.SupportsRegistryHelpers():
                raise ConfigureDockerError(
                    'Invalid Docker version: The version of your Docker client is '
                    '[{}]; version [{}] or higher is required to support Docker '
                    'credential helpers.'.format(
                        current_config.DockerVersion(),
                        cred_utils.MIN_DOCKER_CONFIG_HELPER_VERSION))
        else:
            log.warning(
                '`docker` not in system PATH.\n'
                '`docker` and `docker-credential-gcloud` need to be in the same PATH '
                'in order to work correctly together.\n'
                'gcloud\'s Docker credential helper can be configured but '
                'it will not work until this is corrected.')

        current_helpers = current_config.GetRegisteredCredentialHelpers()
        new_helpers = cred_utils.GetGcloudCredentialHelperConfig()

        if new_helpers == current_helpers:
            log.status.Print('gcloud credential helpers '
                             'already registered correctly.')
            return

        if current_helpers:
            log.warning(
                'Your config file at [{0}] contains these credential helper '
                'entries:\n\n{1}\nThese will be overwritten.'.format(
                    current_config.path, json.dumps(current_helpers,
                                                    indent=2)))

        console_io.PromptContinue(
            message='The following settings will be added to your Docker '
            'config file located at [{0}]:\n {1}'.format(
                current_config.path, json.dumps(new_helpers, indent=2)),
            cancel_on_no=True)

        current_config.RegisterCredentialHelpers()
        log.status.Print('Docker configuration file updated.')
コード例 #2
0
    def __init__(self):
        self.sdk_root = config.Paths().sdk_root
        self.release_channel = config.INSTALLATION_CONFIG.release_channel
        self.repo_url = config.INSTALLATION_CONFIG.snapshot_url
        repos = properties.VALUES.component_manager.additional_repositories.Get(
            validate=False)
        self.additional_repos = repos.split(',') if repos else []
        # Keep it as array for structured output.
        self.path = encoding.GetEncodedValue(os.environ, 'PATH',
                                             '').split(os.pathsep)
        self.python_path = [
            encoding.Decode(path_elem) for path_elem in sys.path
        ]

        if self.sdk_root:
            manager = update_manager.UpdateManager()
            self.components = manager.GetCurrentVersionsInformation()
            self.old_tool_paths = manager.FindAllOldToolsOnPath()
            self.duplicate_tool_paths = manager.FindAllDuplicateToolsOnPath()
            paths = [os.path.realpath(p) for p in self.path]
            this_path = os.path.realpath(
                os.path.join(self.sdk_root,
                             update_manager.UpdateManager.BIN_DIR_NAME))
            # TODO(b/36055867): Validate symlinks in /usr/local/bin when we start
            # creating them.
            self.on_path = this_path in paths
        else:
            self.components = {}
            self.old_tool_paths = []
            self.duplicate_tool_paths = []
            self.on_path = False

        self.kubectl = file_utils.SearchForExecutableOnPath('kubectl')
        if self.kubectl:
            self.kubectl = self.kubectl[0]
コード例 #3
0
ファイル: update_manager.py プロジェクト: bopopescu/brydzenie
    def FindAllOldToolsOnPath(self, path=None):
        """Searches the PATH for any old Cloud SDK tools.

    Args:
      path: str, A path to use instead of the PATH environment variable.

    Returns:
      {str}, The old executable paths.
    """
        bin_dir = os.path.realpath(
            os.path.join(self.__sdk_root, UpdateManager.BIN_DIR_NAME))
        bad_commands = set()
        if not os.path.exists(bin_dir):
            return bad_commands

        commands = [
            f for f in os.listdir(bin_dir) if
            os.path.isfile(os.path.join(bin_dir, f)) and not f.startswith('.')
        ]

        for command in commands:
            existing_paths = file_utils.SearchForExecutableOnPath(command,
                                                                  path=path)
            if existing_paths:
                this_tool = os.path.join(bin_dir, command)
                bad_commands.update(
                    set(os.path.realpath(f)
                        for f in existing_paths) - set([this_tool]))
        return bad_commands
コード例 #4
0
def RunPredict(model_dir, json_instances=None, text_instances=None,
               framework='tensorflow', signature_name=None):
  """Run ML Engine local prediction."""
  instances = predict_utilities.ReadInstancesFromArgs(json_instances,
                                                      text_instances)
  sdk_root = config.Paths().sdk_root
  if not sdk_root:
    raise LocalPredictEnvironmentError(
        'You must be running an installed Cloud SDK to perform local '
        'prediction.')
  # Inheriting the environment preserves important variables in the child
  # process. In particular, LD_LIBRARY_PATH under linux and PATH under windows
  # could be used to point to non-standard install locations of CUDA and CUDNN.
  # If not inherited, the child process could fail to initialize Tensorflow.
  env = os.environ.copy()
  env['CLOUDSDK_ROOT'] = sdk_root
  # We want to use whatever the user's Python was, before the Cloud SDK started
  # changing the PATH. That's where Tensorflow is installed.
  python_executables = files.SearchForExecutableOnPath('python')
  # Need to ensure that ml_sdk is in PYTHONPATH for the import in
  # local_predict to succeed.
  orig_py_path = ':' + env.get('PYTHONPATH') if env.get('PYTHONPATH') else ''
  env['PYTHONPATH'] = (os.path.join(sdk_root, 'lib', 'third_party', 'ml_sdk') +
                       orig_py_path)
  if not python_executables:
    # This doesn't have to be actionable because things are probably beyond help
    # at this point.
    raise LocalPredictEnvironmentError(
        'Something has gone really wrong; we can\'t find a valid Python '
        'executable on your PATH.')
  # Use python found on PATH or local_python override if set
  python_executable = (properties.VALUES.ml_engine.local_python.Get() or
                       python_executables[0])
  predict_args = ['--model-dir', model_dir, '--framework', framework]
  if signature_name:
    predict_args += ['--signature-name', signature_name]
  # Start local prediction in a subprocess.
  proc = subprocess.Popen(
      [python_executable, local_predict.__file__] + predict_args,
      stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
      env=env)

  # Pass the instances to the process that actually runs local prediction.
  for instance in instances:
    proc.stdin.write(json.dumps(instance) + '\n')
  proc.stdin.flush()

  # Get the results for the local prediction.
  output, err = proc.communicate()
  if proc.returncode != 0:
    raise LocalPredictRuntimeError(err)
  if err:
    log.warning(err)

  try:
    return json.loads(output)
  except ValueError:
    raise InvalidReturnValueError('The output for prediction is not '
                                  'in JSON format: ' + output)
コード例 #5
0
def RunPredict(model_dir, json_instances=None, text_instances=None):
  """Run ML Engine local prediction."""
  instances = predict_utilities.ReadInstancesFromArgs(json_instances,
                                                      text_instances)
  sdk_root = config.Paths().sdk_root
  if not sdk_root:
    raise LocalPredictEnvironmentError(
        'You must be running an installed Cloud SDK to perform local '
        'prediction.')
  # Inheriting the environment preserves important variables in the child
  # process. In particular, LD_LIBRARY_PATH under linux and PATH under windows
  # could be used to point to non-standard install locations of CUDA and CUDNN.
  # If not inherited, the child process could fail to initialize Tensorflow.
  env = os.environ.copy()
  env['CLOUDSDK_ROOT'] = sdk_root
  # We want to use whatever the user's Python was, before the Cloud SDK started
  # changing the PATH. That's where Tensorflow is installed.
  python_executables = files.SearchForExecutableOnPath('python')
  if not python_executables:
    # This doesn't have to be actionable because things are probably beyond help
    # at this point.
    raise LocalPredictEnvironmentError(
        'Something has gone really wrong; we can\'t find a valid Python '
        'executable on your PATH.')
  python_executable = python_executables[0]
  # Start local prediction in a subprocess.
  proc = subprocess.Popen(
      [python_executable, local_predict.__file__,
       '--model-dir', model_dir],
      stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
      env=env)

  # Pass the instances to the process that actually runs local prediction.
  for instance in instances:
    proc.stdin.write(json.dumps(instance) + '\n')
  proc.stdin.flush()

  # Get the results for the local prediction.
  output, err = proc.communicate()
  if proc.returncode != 0:
    raise LocalPredictRuntimeError(err)
  if err:
    log.warn(err)

  try:
    return json.loads(output)
  except ValueError:
    raise InvalidReturnValueError('The output for prediction is not '
                                  'in JSON format: ' + output)
コード例 #6
0
ファイル: predict.py プロジェクト: hemanthk92/CaseRoutingDemo
def _RunPredict(args):
    """Run ML Engine local prediction."""
    instances = predict_utilities.ReadInstancesFromArgs(
        args.json_instances, args.text_instances)
    sdk_root = config.Paths().sdk_root
    if not sdk_root:
        raise LocalPredictEnvironmentError(
            'You must be running an installed Cloud SDK to perform local '
            'prediction.')
    env = {'CLOUDSDK_ROOT': sdk_root}
    # We want to use whatever the user's Python was, before the Cloud SDK started
    # changing the PATH. That's where Tensorflow is installed.
    python_executables = files.SearchForExecutableOnPath('python')
    if not python_executables:
        # This doesn't have to be actionable because things are probably beyond help
        # at this point.
        raise LocalPredictEnvironmentError(
            'Something has gone really wrong; we can\'t find a valid Python '
            'executable on your PATH.')
    python_executable = python_executables[0]
    # Start local prediction in a subprocess.
    proc = subprocess.Popen([
        python_executable, local_predict.__file__, '--model-dir',
        args.model_dir
    ],
                            stdin=subprocess.PIPE,
                            stdout=subprocess.PIPE,
                            stderr=subprocess.PIPE,
                            env=env)

    # Pass the instances to the process that actually runs local prediction.
    for instance in instances:
        proc.stdin.write(json.dumps(instance) + '\n')
    proc.stdin.flush()

    # Get the results for the local prediction.
    output, err = proc.communicate()
    if proc.returncode != 0:
        raise LocalPredictRuntimeError(err)
    if err:
        log.warn(err)

    try:
        return json.loads(output)
    except ValueError:
        raise InvalidReturnValueError('The output for prediction is not '
                                      'in JSON format: ' + output)
コード例 #7
0
    def __init__(self, anonymizer=None):
        anonymizer = anonymizer or NoopAnonymizer()
        self.sdk_root = anonymizer.ProcessPath(config.Paths().sdk_root)
        self.release_channel = config.INSTALLATION_CONFIG.release_channel
        self.repo_url = anonymizer.ProcessURL(
            config.INSTALLATION_CONFIG.snapshot_url)
        repos = properties.VALUES.component_manager.additional_repositories.Get(
            validate=False)
        self.additional_repos = (map(anonymizer.ProcessURL, repos.split(','))
                                 if repos else [])
        # Keep it as array for structured output.
        path = encoding.GetEncodedValue(os.environ, 'PATH',
                                        '').split(os.pathsep)
        self.python_path = [
            anonymizer.ProcessPath(encoding.Decode(path_elem))
            for path_elem in sys.path
        ]

        if self.sdk_root:
            manager = update_manager.UpdateManager()
            self.components = manager.GetCurrentVersionsInformation()
            self.other_tool_paths = [
                anonymizer.ProcessPath(p)
                for p in manager.FindAllOtherToolsOnPath()
            ]
            self.duplicate_tool_paths = [
                anonymizer.ProcessPath(p)
                for p in manager.FindAllDuplicateToolsOnPath()
            ]
            paths = [os.path.realpath(p) for p in path]
            this_path = os.path.realpath(
                os.path.join(self.sdk_root,
                             update_manager.UpdateManager.BIN_DIR_NAME))
            self.on_path = this_path in paths
        else:
            self.components = {}
            self.other_tool_paths = []
            self.duplicate_tool_paths = []
            self.on_path = False

        self.path = [anonymizer.ProcessPath(p) for p in path]
        self.kubectl = file_utils.SearchForExecutableOnPath('kubectl')
        if self.kubectl:
            self.kubectl = anonymizer.ProcessPath(self.kubectl[0])
コード例 #8
0
def _VerifyLibIsInstalled(lib_name):
  """Checks whether a python library (module) needed for a test is installed.

  Args:
    lib_name: name of the library, e.g. `tensorflow`.

  Returns:
    A tuple for test skip decorators, consisting of two elements:
     - a boolean value to indicate whether the test should be skipped
     - a string with the reason for the skip, if any
  """

  python_executables = files.SearchForExecutableOnPath('python')
  if not python_executables:
    return False, 'No python executable available'
  python_executable = python_executables[0]
  command = [python_executable, '-c', 'import {}'.format(lib_name)]
  proc = subprocess.Popen(command,
                          stdout=subprocess.PIPE, stderr=subprocess.PIPE)
  stdout, stderr = proc.communicate()
  if proc.returncode:
    # Something went wrong during module import
    return (
        False,
        (
            'Could not verify {lib} install.\n'
            'Python location: {python}\n'
            'Command to test: {command}\n'
            '----------------stdout----------------\n'
            '{stdout}'
            '----------------stderr----------------'
            '{stderr}'.format(lib=lib_name,
                              python=python_executable,
                              command=command,
                              stdout=stdout,
                              stderr=stderr)
        )
    )

  return True, ''
コード例 #9
0
 def DockerExists(self):
     return file_utils.SearchForExecutableOnPath(
         'docker') or file_utils.SearchForExecutableOnPath('docker.exe')
コード例 #10
0
 def DockerCredentialGcloudExists(self):
     return file_utils.SearchForExecutableOnPath(
         'docker-credential-gcloud'
     ) or file_utils.SearchForExecutableOnPath(
         'docker-credential-gcloud.cmd')