def _GetStorageBucket(env_ref, release_track=base.ReleaseTrack.GA):
  env = environments_api_util.Get(env_ref, release_track=release_track)
  if not env.config.dagGcsPrefix:
    raise command_util.Error(BUCKET_MISSING_MSG)
  try:
    gcs_dag_dir = storage_util.ObjectReference.FromUrl(env.config.dagGcsPrefix)
  except (storage_util.InvalidObjectNameError, ValueError):
    raise command_util.Error(BUCKET_MISSING_MSG)
  return gcs_dag_dir.bucket_ref
Beispiel #2
0
    def Run(self, args):
        running_state = (api_util.GetMessagesModule(
            release_track=self.ReleaseTrack()).Environment.
                         StateValueValuesEnum.RUNNING)

        env_ref = args.CONCEPTS.environment.Parse()
        env_obj = environments_api_util.Get(env_ref,
                                            release_track=self.ReleaseTrack())

        if env_obj.state != running_state:
            raise command_util.Error(
                'Cannot execute subcommand for environment in state {}. '
                'Must be RUNNING.'.format(env_obj.state))

        cluster_id = env_obj.config.gkeCluster
        cluster_location_id = command_util.ExtractGkeClusterLocationId(env_obj)

        with command_util.TemporaryKubeconfig(cluster_location_id, cluster_id):
            pod = command_util.GetGkePod(pod_substr=WORKER_POD_SUBSTR)

            self.BypassConfirmationPrompt(args)
            kubectl_args = [
                'exec', pod, '-tic', WORKER_CONTAINER, 'airflow',
                args.subcommand
            ]
            if args.cmd_args:
                # Add '--' to the argument list so kubectl won't eat the command args.
                kubectl_args.extend(['--'] + args.cmd_args)
            command_util.RunKubectlCommand(kubectl_args,
                                           out_func=log.status.Print)
Beispiel #3
0
def _EnsureSubdirExists(bucket_ref, subdir):
    """Checks that a directory marker object exists in the bucket or creates one.

  The directory marker object is needed for subdir listing to not crash
  if the directory is empty.

  Args:
    bucket_ref: googlecloudsk.api_lib.storage.storage_util.BucketReference,
        a reference to the environment's bucket
    subdir: str, the subdirectory to check or recreate. Should not contain
        slashes.
  """
    subdir_name = '{}/'.format(subdir)
    subdir_ref = storage_util.ObjectReference.FromBucketRef(
        bucket_ref, subdir_name)
    storage_client = storage_api.StorageClient()
    try:
        storage_client.GetObject(subdir_ref)
    except apitools_exceptions.HttpNotFoundError:
        # Insert an empty object into the bucket named subdir_name, which will
        # serve as an empty directory marker.
        insert_req = storage_client.messages.StorageObjectsInsertRequest(
            bucket=bucket_ref.bucket, name=subdir_name)
        upload = transfer.Upload.FromStream(io.BytesIO(),
                                            'application/octet-stream')
        try:
            storage_client.client.objects.Insert(insert_req, upload=upload)
        except apitools_exceptions.HttpError:
            raise command_util.Error(
                'Error re-creating empty {}/ directory. List calls may'.format(
                    subdir) +
                'fail, but importing will restore the directory.')
 def CheckSubcommandNestedAirflowSupport(self, args, airflow_version):
     if (args.subcommand_nested
             and not image_versions_command_util.IsVersionInRange(
                 airflow_version, '1.10.14', None)):
         raise command_util.Error(
             'Nested subcommands are supported only for Composer environments '
             'with Airflow version 1.10.14 or higher.')
Beispiel #5
0
    def Run(self, args):
        op_refs = args.CONCEPTS.operations.Parse()

        console_io.PromptContinue(message=command_util.ConstructList(
            'Deleting the following operations: ', [
                '[%s] in [%s]' % (op_ref.operationsId, op_ref.locationsId)
                for op_ref in op_refs
            ]),
                                  cancel_on_no=True,
                                  cancel_string='Deletion aborted by user.',
                                  throw_if_unattended=True)

        encountered_errors = False
        for op_ref in op_refs:
            try:
                operations_api_util.Delete(op_ref,
                                           release_track=self.ReleaseTrack())
                failed = None
            except apitools_exceptions.HttpError as e:
                exc = exceptions.HttpException(e)
                failed = exc.payload.status_message
                encountered_errors = True

            log.DeletedResource(op_ref.RelativeName(),
                                kind='operation',
                                failed=failed)

        if encountered_errors:
            raise command_util.Error('Some deletions did not succeed.')
Beispiel #6
0
 def CheckSubCommandAirflowSupport(self, args, airflow_version):
     from_version, to_version = self.SUBCOMMAND_ALLOWLIST[args.subcommand]
     if not image_versions_command_util.IsVersionInRange(
             airflow_version, from_version, to_version):
         raise command_util.Error(
             'This subcommand {} is not supported for Composer environments with '
             'Airflow version {}.'.format(args.subcommand,
                                          airflow_version), )
    def Run(self, args):
        self.DeprecationWarningPrompt(args)
        self.CheckForRequiredCmdArgs(args)

        running_state = (api_util.GetMessagesModule(
            release_track=self.ReleaseTrack()).Environment.
                         StateValueValuesEnum.RUNNING)

        env_ref = args.CONCEPTS.environment.Parse()
        env_obj = environments_api_util.Get(env_ref,
                                            release_track=self.ReleaseTrack())

        if env_obj.state != running_state:
            raise command_util.Error(
                'Cannot execute subcommand for environment in state {}. '
                'Must be RUNNING.'.format(env_obj.state))

        cluster_id = env_obj.config.gkeCluster
        cluster_location_id = command_util.ExtractGkeClusterLocationId(env_obj)

        tty = 'no-tty' not in args

        with command_util.TemporaryKubeconfig(cluster_location_id, cluster_id):
            try:
                image_version = env_obj.config.softwareConfig.imageVersion
                airflow_version = self._ExtractAirflowVersion(image_version)

                self.CheckSubcommandAirflowSupport(args, airflow_version)
                self.CheckSubcommandNestedAirflowSupport(args, airflow_version)

                kubectl_ns = command_util.FetchKubectlNamespace(image_version)
                pod = command_util.GetGkePod(pod_substr=WORKER_POD_SUBSTR,
                                             kubectl_namespace=kubectl_ns)

                log.status.Print(
                    'Executing within the following Kubernetes cluster namespace: '
                    '{}'.format(kubectl_ns))

                self.BypassConfirmationPrompt(args, airflow_version)
                kubectl_args = ['exec', pod, '--stdin']
                if tty:
                    kubectl_args.append('--tty')
                kubectl_args.extend([
                    '--container', WORKER_CONTAINER, '--', 'airflow',
                    args.subcommand
                ])
                if args.subcommand_nested:
                    kubectl_args.append(args.subcommand_nested)
                if args.cmd_args:
                    kubectl_args.extend(args.cmd_args)

                command_util.RunKubectlCommand(
                    command_util.AddKubectlNamespace(kubectl_ns, kubectl_args),
                    out_func=log.out.Print)
            except command_util.KubectlError as e:
                raise self.ConvertKubectlError(e, env_obj)
 def _SynchronousExecution(self, env_resource, operation):
     try:
         operations_api_util.WaitForOperation(
             operation,
             'Waiting for [{}] to be updated with [{}]'.format(
                 env_resource.RelativeName(), operation.name),
             release_track=self.ReleaseTrack())
     except command_util.Error as e:
         raise command_util.Error(
             'Error restarting web server [{}]: {}'.format(
                 env_resource.RelativeName(), six.text_type(e)))
Beispiel #9
0
def _ImportStorageApi(gcs_bucket, source, destination):
    """Imports files and directories into a bucket."""
    client = storage_api.StorageClient()

    old_source = source
    source = source.rstrip('*')
    # Source ends with an asterisk. This means the user indicates that the source
    # is a directory so we shouldn't bother trying to see if source is an object.
    # This is important because we always have certain subdirs created as objects
    # (e.g. dags/), so if we don't do this check, import/export will just try
    # and copy this empty object.
    object_is_subdir = old_source != source
    if not object_is_subdir:
        # If source is not indicated to be a subdir, then strip the ending slash
        # so the specified directory is present in the destination.
        source = source.rstrip(posixpath.sep)

    source_is_local = not source.startswith('gs://')
    if source_is_local and not os.path.exists(source):
        raise command_util.Error('Source for import does not exist.')

    # Don't include the specified directory as we want that present in the
    # destination bucket.
    source_dirname = _JoinPaths(os.path.dirname(source),
                                '',
                                gsutil_path=not source_is_local)
    if source_is_local:
        if os.path.isdir(source):
            file_chooser = gcloudignore.GetFileChooserForDir(source)
            for rel_path in file_chooser.GetIncludedFiles(source):
                file_path = _JoinPaths(source, rel_path)
                if os.path.isdir(file_path):
                    continue
                dest_path = _GetDestPath(source_dirname, file_path,
                                         destination, False)
                obj_ref = storage_util.ObjectReference.FromBucketRef(
                    gcs_bucket, dest_path)
                client.CopyFileToGCS(file_path, obj_ref)
        else:  # Just upload the file.
            dest_path = _GetDestPath(source_dirname, source, destination,
                                     False)
            obj_ref = storage_util.ObjectReference.FromBucketRef(
                gcs_bucket, dest_path)
            client.CopyFileToGCS(source, obj_ref)
    else:
        source_ref = storage_util.ObjectReference.FromUrl(source)
        to_import = _GetObjectOrSubdirObjects(
            source_ref, object_is_subdir=object_is_subdir, client=client)
        for obj in to_import:
            dest_object = storage_util.ObjectReference.FromBucketRef(
                gcs_bucket,
                # Use obj.ToUrl() to ensure that the dirname is properly stripped.
                _GetDestPath(source_dirname, obj.ToUrl(), destination, False))
            client.Copy(obj, dest_object)
 def _SynchronousExecution(self, env_resource, operation):
     try:
         operations_api_util.WaitForOperation(
             operation,
             'Waiting for [{}] to be updated with [{}]'.format(
                 env_resource.RelativeName(), operation.name),
             release_track=self.ReleaseTrack())
     except command_util.Error as e:
         raise command_util.Error(
             'Failed to load the snapshot of the environment [{}]: {}'.
             format(env_resource.RelativeName(), six.text_type(e)))
Beispiel #11
0
 def ConvertKubectlError(self, error, env_obj):
     is_private = (
         env_obj.config.privateEnvironmentConfig and
         env_obj.config.privateEnvironmentConfig.enablePrivateEnvironment)
     if is_private:
         return command_util.Error(
             six.text_type(error) +
             ' Make sure you have followed https://cloud.google.com/composer/docs/how-to/accessing/airflow-cli#running_commands_on_a_private_ip_environment '
             'to enable access to your private Cloud Composer environment from '
             'your machine.')
     return error
def Patch(env_resource,
          field_mask,
          patch,
          is_async,
          release_track=base.ReleaseTrack.GA):
    """Patches an Environment, optionally waiting for the operation to complete.

  This function is intended to perform the common work of an Environment
  patching command's Run method. That is, calling the patch API method and
  waiting for the result or immediately returning the Operation.

  Args:
    env_resource: googlecloudsdk.core.resources.Resource, Resource representing
        the Environment to be patched
    field_mask: str, a field mask string containing comma-separated paths to be
        patched
    patch: Environment, a patch Environment containing updated values to apply
    is_async: bool, whether or not to perform the patch asynchronously
    release_track: base.ReleaseTrack, the release track of command. Will dictate
        which Composer client library will be used.

  Returns:
    an Operation corresponding to the Patch call if `is_async` is True;
    otherwise None is returned after the operation is complete

  Raises:
    command_util.Error: if `is_async` is False and the operation encounters
    an error
  """
    operation = environments_api_util.Patch(env_resource,
                                            patch,
                                            field_mask,
                                            release_track=release_track)
    details = 'with operation [{0}]'.format(operation.name)
    if is_async:
        log.UpdatedResource(env_resource.RelativeName(),
                            kind='environment',
                            is_async=True,
                            details=details)
        return operation

    try:
        operations_api_util.WaitForOperation(
            operation,
            'Waiting for [{}] to be updated with [{}]'.format(
                env_resource.RelativeName(), operation.name),
            release_track=release_track)
    except command_util.Error as e:
        raise command_util.Error('Error updating [{}]: {}'.format(
            env_resource.RelativeName(), six.text_type(e)))
    def CheckForRequiredCmdArgs(self, args):
        """Prevents running Airflow CLI commands without required arguments.

    Args:
      args: argparse.Namespace, An object that contains the values for the
        arguments specified in the .Args() method.
    """
        # Dict values are lists of tuples, each tuple represents set of arguments,
        # where at least one argument from tuple will be required.
        # E.g. for "users create" subcommand, one of the "-p", "--password" or
        # "--use-random-password" will be required.
        required_cmd_args = {
            ('users', 'create'):
            [['-p', '--password', '--use-random-password']],
        }

        def _StringifyRequiredCmdArgs(cmd_args):
            quoted_args = ['"{}"'.format(a) for a in cmd_args]
            return '[{}]'.format(', '.join(quoted_args))

        # Handle nested commands like "users create". There are two ways to execute
        # nested Airflow subcommands via gcloud:
        # 1. {command} myenv users create -- -u User
        # 2. {command} myenv users -- create -u User
        # TODO (b/185343261): avoid code duplication with BypassConfirmationPrompt.
        subcommand_two_level = None
        if args.subcommand_nested:
            subcommand_two_level = (args.subcommand, args.subcommand_nested)
        elif args.cmd_args:
            # It is possible that first element of args.cmd_args will not be a nested
            # subcommand, but that is ok as it will not break entire logic.
            # So, essentially there can be subcommand_two_level =
            # ['info', '--anonymize'].
            subcommand_two_level = (args.subcommand, args.cmd_args[0])

        # For now `required_cmd_args` contains only two-level Airflow commands,
        # but potentially in the future it could be extended for one-level
        # commands as well, and this code will have to be updated appropriately.
        for subcommand_required_cmd_args in required_cmd_args.get(
                subcommand_two_level, []):
            if set(subcommand_required_cmd_args).isdisjoint(
                    set(args.cmd_args or [])):
                raise command_util.Error(
                    'The subcommand "{}" requires one of the following command line '
                    'arguments: {}.'.format(
                        ' '.join(subcommand_two_level),
                        _StringifyRequiredCmdArgs(
                            subcommand_required_cmd_args)))
def Export(env_ref, sources, destination, release_track=base.ReleaseTrack.GA):
  """Exports files and directories from an environment's Cloud Storage bucket.

  Args:
    env_ref: googlecloudsdk.core.resources.Resource, Resource representing
        the Environment whose bucket from which to export.
    sources: [str], a list of bucket-relative paths from which to export files.
        Directory sources are imported recursively; the directory itself will
        be present in the destination bucket. Can also include wildcards.
    destination: str, existing local directory or path to a Cloud Storage
        bucket or directory object to which to export.
        Must have a single trailing slash but no leading slash. For
        example, 'dir/foo/bar/'.
    release_track: base.ReleaseTrack, the release track of command. Will dictate
        which Composer client library will be used.

  Returns:
    None

  Raises:
    command_util.Error: if the storage bucket could not be retrieved or a
      non-Cloud Storage destination that is not a local directory was provided.
    command_util.GsutilError: the gsutil command failed
  """
  gcs_bucket = _GetStorageBucket(env_ref, release_track=release_track)
  source_refs = [
      storage_util.ObjectReference(gcs_bucket, source)
      for source in sources
  ]
  if destination.startswith('gs://'):
    destination = posixpath.join(destination.strip(posixpath.sep), '')
  elif not os.path.isdir(destination):
    raise command_util.Error('Destination for export must be a directory.')

  try:
    retval = storage_util.RunGsutilCommand(
        'cp',
        command_args=(['-r']
                      + [s.ToUrl() for s in source_refs]
                      + [destination]),
        run_concurrent=True,
        out_func=log.out.write,
        err_func=log.err.write)
  except (execution_utils.PermissionError,
          execution_utils.InvalidCommandError) as e:
    raise command_util.GsutilError(six.text_type(e))
  if retval:
    raise command_util.GsutilError('gsutil returned non-zero status code.')
Beispiel #15
0
  def Run(self, args):
    self.DeprecationWarningPrompt(args)

    running_state = (
        api_util.GetMessagesModule(release_track=self.ReleaseTrack())
        .Environment.StateValueValuesEnum.RUNNING)

    env_ref = args.CONCEPTS.environment.Parse()
    env_obj = environments_api_util.Get(
        env_ref, release_track=self.ReleaseTrack())

    if env_obj.state != running_state:
      raise command_util.Error(
          'Cannot execute subcommand for environment in state {}. '
          'Must be RUNNING.'.format(env_obj.state))

    cluster_id = env_obj.config.gkeCluster
    cluster_location_id = command_util.ExtractGkeClusterLocationId(env_obj)

    with command_util.TemporaryKubeconfig(cluster_location_id, cluster_id):
      try:
        kubectl_ns = command_util.FetchKubectlNamespace(
            env_obj.config.softwareConfig.imageVersion)
        pod = command_util.GetGkePod(
            pod_substr=WORKER_POD_SUBSTR, kubectl_namespace=kubectl_ns)

        log.status.Print(
            'Executing within the following kubectl namespace: {}'.format(
                kubectl_ns))

        self.BypassConfirmationPrompt(args)
        kubectl_args = [
            'exec', pod, '-tic', WORKER_CONTAINER, 'airflow', args.subcommand
        ]
        if args.cmd_args:
          # Add '--' to the argument list so kubectl won't eat the command args.
          kubectl_args.extend(['--'] + args.cmd_args)

        command_util.RunKubectlCommand(
            command_util.AddKubectlNamespace(kubectl_ns, kubectl_args),
            out_func=log.status.Print)
      except command_util.KubectlError as e:
        raise self.ConvertKubectlError(e, env_obj)
Beispiel #16
0
def _ExportStorageApi(gcs_bucket, source, destination):
    """Exports files and directories from an environment's GCS bucket."""
    old_source = source
    source = source.rstrip('*')
    # Source ends with an asterisk. This means the user indicates that the source
    # is a directory so we shouldn't bother trying to see if source is an object.
    # This is important because we always have certain subdirs created as objects
    # (e.g. dags/), so if we don't do this check, import/export will just try
    # and copy this empty object.
    object_is_subdir = old_source != source

    client = storage_api.StorageClient()
    source_ref = storage_util.ObjectReference(gcs_bucket, source)
    dest_is_local = True
    if destination.startswith('gs://'):
        destination = _JoinPaths(destination.strip(posixpath.sep),
                                 '',
                                 gsutil_path=True)
        dest_is_local = False
    elif not os.path.isdir(destination):
        raise command_util.Error('Destination for export must be a directory.')

    source_dirname = _JoinPaths(os.path.dirname(source), '', gsutil_path=True)
    to_export = _GetObjectOrSubdirObjects(source_ref,
                                          object_is_subdir=object_is_subdir,
                                          client=client)
    if dest_is_local:
        for obj in to_export:
            dest_path = _GetDestPath(source_dirname, obj.name, destination,
                                     True)
            files.MakeDir(os.path.dirname(dest_path))
            # Command description for export commands says overwriting is default
            # behavior.
            client.CopyFileFromGCS(obj.bucket_ref,
                                   obj.name,
                                   dest_path,
                                   overwrite=True)
    else:
        for obj in to_export:
            dest_object = storage_util.ObjectReference.FromUrl(
                _GetDestPath(source_dirname, obj.name, destination, False))
            client.Copy(obj, dest_object)
def _ExportGsutil(gcs_bucket, source, destination):
  """Exports files and directories from an environment's GCS bucket."""
  source_ref = storage_util.ObjectReference.FromBucketRef(gcs_bucket, source)
  if destination.startswith('gs://'):
    destination = _JoinPaths(
        destination.strip(posixpath.sep), '', gsutil_path=True)
  elif not os.path.isdir(destination):
    raise command_util.Error('Destination for export must be a directory.')

  try:
    retval = storage_util.RunGsutilCommand(
        'cp',
        command_args=['-r', source_ref.ToUrl(), destination],
        run_concurrent=True,
        out_func=log.out.write,
        err_func=log.err.write)
  except (execution_utils.PermissionError,
          execution_utils.InvalidCommandError) as e:
    raise command_util.GsutilError(six.text_type(e))
  if retval:
    raise command_util.GsutilError('gsutil returned non-zero status code.')
def ConstructPatch(env_ref=None,
                   node_count=None,
                   update_pypi_packages_from_file=None,
                   clear_pypi_packages=None,
                   remove_pypi_packages=None,
                   update_pypi_packages=None,
                   clear_labels=None,
                   remove_labels=None,
                   update_labels=None,
                   clear_airflow_configs=None,
                   remove_airflow_configs=None,
                   update_airflow_configs=None,
                   clear_env_variables=None,
                   remove_env_variables=None,
                   update_env_variables=None,
                   update_image_version=None,
                   update_web_server_access_control=None,
                   cloud_sql_machine_type=None,
                   web_server_machine_type=None,
                   scheduler_cpu=None,
                   worker_cpu=None,
                   scheduler_memory_gb=None,
                   worker_memory_gb=None,
                   min_workers=None,
                   max_workers=None,
                   maintenance_window_start=None,
                   maintenance_window_end=None,
                   maintenance_window_recurrence=None,
                   release_track=base.ReleaseTrack.GA):
    """Constructs an environment patch.

  Args:
    env_ref: resource argument, Environment resource argument for environment
      being updated.
    node_count: int, the desired node count
    update_pypi_packages_from_file: str, path to local requirements file
      containing desired pypi dependencies.
    clear_pypi_packages: bool, whether to uninstall all PyPI packages.
    remove_pypi_packages: iterable(string), Iterable of PyPI packages to
      uninstall.
    update_pypi_packages: {string: string}, dict mapping PyPI package name to
      extras and version specifier.
    clear_labels: bool, whether to clear the labels dictionary.
    remove_labels: iterable(string), Iterable of label names to remove.
    update_labels: {string: string}, dict of label names and values to set.
    clear_airflow_configs: bool, whether to clear the Airflow configs
      dictionary.
    remove_airflow_configs: iterable(string), Iterable of Airflow config
      property names to remove.
    update_airflow_configs: {string: string}, dict of Airflow config property
      names and values to set.
    clear_env_variables: bool, whether to clear the environment variables
      dictionary.
    remove_env_variables: iterable(string), Iterable of environment variables
      to remove.
    update_env_variables: {string: string}, dict of environment variable
      names and values to set.
    update_image_version: string, image version to use for environment upgrade
    update_web_server_access_control: [{string: string}], Webserver access
        control to set
    cloud_sql_machine_type: str or None, Cloud SQL machine type used by the
        Airflow database.
    web_server_machine_type: str or None, machine type used by the Airflow web
        server
    scheduler_cpu: float or None, CPU allocated to Airflow scheduler.
        Can be specified only in Composer 2.0.0.
    worker_cpu: float or None, CPU allocated to each Airflow worker.
        Can be specified only in Composer 2.0.0.
    scheduler_memory_gb: float or None, memory allocated to Airflow scheduler.
      Can be specified only in Composer 2.0.0.
    worker_memory_gb: float or None, memory allocated to each Airflow worker.
      Can be specified only in Composer 2.0.0.
    min_workers: int or None, minimum number of workers
        in the Environment. Can be specified only in Composer 2.0.0.
    max_workers: int or None, maximumn number of workers
        in the Environment. Can be specified only in Composer 2.0.0.
    maintenance_window_start: Datetime or None, a starting date of the
        maintenance window.
    maintenance_window_end: Datetime or None, an ending date of the maintenance
        window.
    maintenance_window_recurrence: str or None, recurrence RRULE for the
        maintenance window.
    release_track: base.ReleaseTrack, the release track of command. Will dictate
        which Composer client library will be used.

  Returns:
    (str, Environment), the field mask and environment to use for update.

  Raises:
    command_util.Error: if no update type is specified
  """
    if node_count:
        return _ConstructNodeCountPatch(node_count,
                                        release_track=release_track)
    if update_pypi_packages_from_file:
        return _ConstructPyPiPackagesPatch(
            True, [],
            command_util.ParseRequirementsFile(update_pypi_packages_from_file),
            release_track=release_track)
    if clear_pypi_packages or remove_pypi_packages or update_pypi_packages:
        return _ConstructPyPiPackagesPatch(clear_pypi_packages,
                                           remove_pypi_packages,
                                           update_pypi_packages,
                                           release_track=release_track)
    if clear_labels or remove_labels or update_labels:
        return _ConstructLabelsPatch(clear_labels,
                                     remove_labels,
                                     update_labels,
                                     release_track=release_track)
    if (clear_airflow_configs or remove_airflow_configs
            or update_airflow_configs):
        return _ConstructAirflowConfigsPatch(clear_airflow_configs,
                                             remove_airflow_configs,
                                             update_airflow_configs,
                                             release_track=release_track)
    if clear_env_variables or remove_env_variables or update_env_variables:
        return _ConstructEnvVariablesPatch(env_ref,
                                           clear_env_variables,
                                           remove_env_variables,
                                           update_env_variables,
                                           release_track=release_track)
    if update_image_version:
        return _ConstructImageVersionPatch(update_image_version,
                                           release_track=release_track)
    if update_web_server_access_control is not None:
        return _ConstructWebServerAccessControlPatch(
            update_web_server_access_control, release_track=release_track)
    if cloud_sql_machine_type:
        return _ConstructCloudSqlMachineTypePatch(cloud_sql_machine_type,
                                                  release_track=release_track)
    if web_server_machine_type:
        return _ConstructWebServerMachineTypePatch(web_server_machine_type,
                                                   release_track=release_track)
    if (scheduler_cpu or worker_cpu or scheduler_memory_gb or worker_memory_gb
            or min_workers or max_workers):
        return _ConstructAutoscalingPatch(
            scheduler_cpu=scheduler_cpu,
            worker_cpu=worker_cpu,
            scheduler_memory_gb=scheduler_memory_gb,
            worker_memory_gb=worker_memory_gb,
            worker_min_count=min_workers,
            worker_max_count=max_workers,
            release_track=release_track)
    if maintenance_window_start and maintenance_window_end and maintenance_window_recurrence:
        return _ConstructMaintenanceWindowPatch(maintenance_window_start,
                                                maintenance_window_end,
                                                maintenance_window_recurrence,
                                                release_track=release_track)
    raise command_util.Error(
        'Cannot update Environment with no update type specified.')
Beispiel #19
0
def ConstructPatch(env_ref=None,
                   node_count=None,
                   update_pypi_packages_from_file=None,
                   clear_pypi_packages=None,
                   remove_pypi_packages=None,
                   update_pypi_packages=None,
                   clear_labels=None,
                   remove_labels=None,
                   update_labels=None,
                   clear_airflow_configs=None,
                   remove_airflow_configs=None,
                   update_airflow_configs=None,
                   clear_env_variables=None,
                   remove_env_variables=None,
                   update_env_variables=None,
                   update_image_version=None,
                   release_track=base.ReleaseTrack.GA):
  """Constructs an environment patch.

  Args:
    env_ref: resource argument, Environment resource argument for environment
      being updated.
    node_count: int, the desired node count
    update_pypi_packages_from_file: str, path to local requirements file
      containing desired pypi dependencies.
    clear_pypi_packages: bool, whether to uninstall all PyPI packages.
    remove_pypi_packages: iterable(string), Iterable of PyPI packages to
      uninstall.
    update_pypi_packages: {string: string}, dict mapping PyPI package name to
      extras and version specifier.
    clear_labels: bool, whether to clear the labels dictionary.
    remove_labels: iterable(string), Iterable of label names to remove.
    update_labels: {string: string}, dict of label names and values to set.
    clear_airflow_configs: bool, whether to clear the Airflow configs
      dictionary.
    remove_airflow_configs: iterable(string), Iterable of Airflow config
      property names to remove.
    update_airflow_configs: {string: string}, dict of Airflow config property
      names and values to set.
    clear_env_variables: bool, whether to clear the environment variables
      dictionary.
    remove_env_variables: iterable(string), Iterable of environment variables
      to remove.
    update_env_variables: {string: string}, dict of environment variable
      names and values to set.
    update_image_version: string, image version to use for environment upgrade
    release_track: base.ReleaseTrack, the release track of command. Will dictate
        which Composer client library will be used.

  Returns:
    (str, Environment), the field mask and environment to use for update.

  Raises:
    command_util.Error: if no update type is specified
  """
  if node_count:
    return _ConstructNodeCountPatch(node_count, release_track=release_track)
  if update_pypi_packages_from_file:
    return _ConstructPyPiPackagesPatch(
        True, [],
        command_util.ParseRequirementsFile(update_pypi_packages_from_file),
        release_track=release_track)
  if clear_pypi_packages or remove_pypi_packages or update_pypi_packages:
    return _ConstructPyPiPackagesPatch(
        clear_pypi_packages,
        remove_pypi_packages,
        update_pypi_packages,
        release_track=release_track)
  if clear_labels or remove_labels or update_labels:
    return _ConstructLabelsPatch(
        clear_labels, remove_labels, update_labels, release_track=release_track)
  if (clear_airflow_configs or remove_airflow_configs or
      update_airflow_configs):
    return _ConstructAirflowConfigsPatch(
        clear_airflow_configs,
        remove_airflow_configs,
        update_airflow_configs,
        release_track=release_track)
  if clear_env_variables or remove_env_variables or update_env_variables:
    return _ConstructEnvVariablesPatch(
        env_ref,
        clear_env_variables,
        remove_env_variables,
        update_env_variables,
        release_track=release_track)
  if update_image_version:
    return _ConstructImageVersionPatch(
        update_image_version, release_track=release_track)
  raise command_util.Error(
      'Cannot update Environment with no update type specified.')