コード例 #1
0
def GetConfigurationChanges(args):
    """Returns a list of changes to Configuration, based on the flags set."""
    changes = []
    if _HasEnvChanges(args):
        changes.append(_GetEnvChanges(args))

    if 'memory' in args and args.memory:
        changes.append(config_changes.ResourceChanges(memory=args.memory))
    if 'concurrency' in args and args.concurrency:
        try:
            c = int(args.concurrency)
        except ValueError:
            c = args.concurrency
            if c != 'default':
                log.warning(
                    'Specifying concurrency as Single or Multi is deprecated; '
                    'an integer is preferred.')
        changes.append(config_changes.ConcurrencyChanges(concurrency=c))
    if 'timeout' in args and args.timeout:
        try:
            # A bare number is interpreted as seconds.
            timeout_secs = int(args.timeout)
        except ValueError:
            timeout_duration = times.ParseDuration(args.timeout)
            timeout_secs = int(timeout_duration.total_seconds)
        if timeout_secs <= 0:
            raise ArgumentError(
                'The --timeout argument must be a positive time duration.')
        changes.append(config_changes.TimeoutChanges(timeout=timeout_secs))
    return changes
コード例 #2
0
 def Display(self, value):
     """Returns the display string for a duration value, leading PT dropped."""
     d = times.ParseDuration('{}s'.format(value))
     s = times.FormatDuration(d)
     if s.startswith('PT'):
         s = s[2:].lower()
     return s
コード例 #3
0
def GetServiceTimeoutSeconds(timeout_property_str):
    """Returns the service timeout in seconds given the duration string."""
    if timeout_property_str is None:
        return None
    build_timeout_duration = times.ParseDuration(timeout_property_str,
                                                 default_suffix='s')
    return int(build_timeout_duration.total_seconds)
コード例 #4
0
  def _PossiblyBuildAndPush(
      self, new_version, service, upload_dir, source_files, image,
      code_bucket_ref, gcr_domain, flex_image_build_option):
    """Builds and Pushes the Docker image if necessary for this service.

    Args:
      new_version: version_util.Version describing where to deploy the service
      service: yaml_parsing.ServiceYamlInfo, service configuration to be
        deployed
      upload_dir: str, path to the service's upload directory
      source_files: [str], relative paths to upload.
      image: str or None, the URL for the Docker image to be deployed (if image
        already exists).
      code_bucket_ref: cloud_storage.BucketReference where the service's files
        have been uploaded
      gcr_domain: str, Cloud Registry domain, determines the physical location
        of the image. E.g. `us.gcr.io`.
      flex_image_build_option: FlexImageBuildOptions, whether a flex deployment
        should upload files so that the server can build the image or build the
        image on client.
    Returns:
      BuildArtifact, a wrapper which contains either the build ID for
        an in-progress build, or the name of the container image for a serial
        build. Possibly None if the service does not require an image.
    Raises:
      RequiredFileMissingError: if a required file is not uploaded.
    """
    build = None
    if image:
      if service.RequiresImage() and service.parsed.skip_files.regex:
        log.warning('Deployment of service [{0}] will ignore the skip_files '
                    'field in the configuration file, because the image has '
                    'already been built.'.format(new_version.service))
      return app_cloud_build.BuildArtifact.MakeImageArtifact(image)
    elif service.RequiresImage():
      if not _AppYamlInSourceFiles(source_files, service.GetAppYamlBasename()):
        raise RequiredFileMissingError(service.GetAppYamlBasename())

      if flex_image_build_option == FlexImageBuildOptions.ON_SERVER:
        cloud_build_options = {
            'appYamlPath': service.GetAppYamlBasename(),
        }
        timeout = properties.VALUES.app.cloud_build_timeout.Get()
        if timeout:
          build_timeout = int(
              times.ParseDuration(timeout, default_suffix='s').total_seconds)
          cloud_build_options['cloudBuildTimeout'] = six.text_type(
              build_timeout) + 's'
        build = app_cloud_build.BuildArtifact.MakeBuildOptionsArtifact(
            cloud_build_options)
      else:
        build = deploy_command_util.BuildAndPushDockerImage(
            new_version.project, service, upload_dir, source_files,
            new_version.id, code_bucket_ref, gcr_domain,
            self.deploy_options.runtime_builder_strategy,
            self.deploy_options.parallel_build)

    return build
コード例 #5
0
def ParseExpireTime(expiration_value):
  """Parse flag value into Datetime format for expireTime."""
  # expiration_value could be in Datetime format or Duration format.
  datetime = (
      times.ParseDuration(expiration_value).GetRelativeDateTime(
          times.Now(times.UTC)))
  parsed_datetime = times.FormatDateTime(
      datetime, '%Y-%m-%dT%H:%M:%S.%6f%Ez', tzinfo=times.UTC)
  return parsed_datetime
コード例 #6
0
 def Parse(value):
   """Parses a duration from value and returns integer seconds."""
   try:
     return int(
         times.ParseDuration(value, default_suffix=default_unit).total_seconds)
   except times.Error as e:
     message = six.text_type(e).rstrip('.')
     raise ArgumentTypeError(_GenerateErrorMessage(
         'Failed to parse duration: {0}'.format(message, user_input=value)))
コード例 #7
0
def GetServiceTimeoutString(timeout_property_str):
    if timeout_property_str is not None:
        try:
            # A bare number is interpreted as seconds.
            build_timeout_secs = int(timeout_property_str)
        except ValueError:
            build_timeout_duration = times.ParseDuration(timeout_property_str)
            build_timeout_secs = int(build_timeout_duration.total_seconds)
        return str(build_timeout_secs) + 's'
    return None
コード例 #8
0
def GetConfigurationChanges(args):
    """Returns a list of changes to Configuration, based on the flags set."""
    changes = []
    if _HasEnvChanges(args):
        changes.append(_GetEnvChanges(args))

    if _HasCloudSQLChanges(args):
        region = GetRegion(args)
        project = (getattr(args, 'project', None)
                   or properties.VALUES.core.project.Get(required=True))
        _CheckCloudSQLApiEnablement()
        changes.append(config_changes.CloudSQLChanges(project, region, args))

    if 'cpu' in args and args.cpu:
        changes.append(config_changes.ResourceChanges(cpu=args.cpu))
    if 'memory' in args and args.memory:
        changes.append(config_changes.ResourceChanges(memory=args.memory))
    if 'concurrency' in args and args.concurrency:
        try:
            c = int(args.concurrency)
        except ValueError:
            c = args.concurrency
            if c != 'default':
                log.warning(
                    'Specifying concurrency as Single or Multi is deprecated; '
                    'an integer is preferred.')
        changes.append(config_changes.ConcurrencyChanges(concurrency=c))
    if 'timeout' in args and args.timeout:
        try:
            # A bare number is interpreted as seconds.
            timeout_secs = int(args.timeout)
        except ValueError:
            timeout_duration = times.ParseDuration(args.timeout)
            timeout_secs = int(timeout_duration.total_seconds)
        if timeout_secs <= 0:
            raise ArgumentError(
                'The --timeout argument must be a positive time duration.')
        changes.append(config_changes.TimeoutChanges(timeout=timeout_secs))
    if 'service_account' in args and args.service_account:
        changes.append(
            config_changes.ServiceAccountChanges(
                service_account=args.service_account))
    if _HasLabelChanges(args):
        diff = labels_util.Diff.FromUpdateArgs(args)
        if diff.MayHaveUpdates():
            changes.append(config_changes.LabelChanges(diff))
    if 'revision_suffix' in args and args.revision_suffix:
        changes.append(config_changes.RevisionNameChanges(
            args.revision_suffix))
    if 'vpc_connector' in args and args.vpc_connector:
        changes.append(config_changes.VpcConnectorChange(args.vpc_connector))
    if 'clear_vpc_connector' in args and args.clear_vpc_connector:
        changes.append(config_changes.ClearVpcConnectorChange())

    return changes
コード例 #9
0
 def Convert(self, string):
     """Converts a duration from string returns it."""
     if not string:
         return None
     try:
         d = times.ParseDuration(
             string, default_suffix=self._default_suffix).total_seconds
         return d if self._subsecond else int(d)
     except times.Error as e:
         raise exceptions.ParseError(
             self.GetPresentationName(),
             'Failed to parse duration [{}]: {}.'.format(
                 string, _SubException(e)))
コード例 #10
0
ファイル: submit_util.py プロジェクト: PinTrees/novelhub
def _GetBuildTimeout():
    """Get the build timeout."""
    build_timeout = properties.VALUES.builds.timeout.Get()
    if build_timeout is not None:
        try:
            # A bare number is interpreted as seconds.
            build_timeout_secs = int(build_timeout)
        except ValueError:
            build_timeout_duration = times.ParseDuration(build_timeout)
            build_timeout_secs = int(build_timeout_duration.total_seconds)
        timeout_str = six.text_type(build_timeout_secs) + 's'
    else:
        timeout_str = None

    return timeout_str
コード例 #11
0
 def Parse(value):
   """Parses a duration from value and returns integer of the parsed_unit."""
   if parsed_unit == 'ms':
     multiplier = 1000
   elif parsed_unit == 'us':
     multiplier = 1000000
   elif parsed_unit == 's':
     multiplier = 1
   else:
     raise ArgumentTypeError(
         _GenerateErrorMessage('parsed_unit must be one of s, ms, us.'))
   try:
     duration = times.ParseDuration(value, default_suffix=default_unit)
     return int(duration.total_seconds * multiplier)
   except times.Error as e:
     message = six.text_type(e).rstrip('.')
     raise ArgumentTypeError(_GenerateErrorMessage(
         'Failed to parse duration: {0}'.format(message, user_input=value)))
コード例 #12
0
def _ParseSegmentTimestamp(timestamp_string):
    """Parse duration formatted segment timestamp into a Duration object.

  Assumes string with no duration unit specified (e.g. 's' or 'm' etc.) is
  an int representing microseconds.

  Args:
    timestamp_string: str, string to convert

  Raises:
    ValueError: timestamp_string is not a properly formatted duration, not a
    int or int value is <0

  Returns:
    Duration object represented by timestamp_string
  """
    # Assume timestamp_string passed as int number of microseconds if no unit
    # e.g. 4566, 100, etc.
    try:
        microseconds = int(timestamp_string)
    except ValueError:
        try:
            duration = times.ParseDuration(timestamp_string)
            if duration.total_seconds < 0:
                raise times.DurationValueError()
            return duration
        except (times.DurationSyntaxError, times.DurationValueError):
            raise ValueError(
                'Could not parse timestamp string [{}]. Timestamp must '
                'be a properly formatted duration string with time '
                'amount and units (e.g. 1m3.456s, 2m, 14.4353s)'.format(
                    timestamp_string))
    else:
        log.warning(
            "Time unit missing ('s', 'm','h') for segment timestamp [{}], "
            "parsed as microseconds.".format(timestamp_string))

    if microseconds < 0:
        raise ValueError(
            'Could not parse duration string [{}]. Timestamp must be'
            'greater than >= 0)'.format(timestamp_string))

    return iso_duration.Duration(microseconds=microseconds)
コード例 #13
0
ファイル: kubernetes.py プロジェクト: saranraju90/multik8s
def _StartMinikubeCluster(cluster_name, vm_driver, debug=False):
    """Starts a minikube cluster."""
    # pylint: disable=broad-except
    try:
        if not _IsMinikubeClusterUp(cluster_name):
            cmd = [
                _FindMinikube(),
                'start',
                '-p',
                cluster_name,
                '--keep-context',
                '--interactive=false',
                '--delete-on-failure',
                '--install-addons=false',
                '--output=json',
            ]
            if vm_driver:
                cmd.append('--vm-driver=' + vm_driver)
                if vm_driver == 'docker':
                    cmd.append('--container-runtime=docker')
            if debug:
                cmd.extend(['--alsologtostderr', '-v8'])

            start_msg = "Starting development environment '%s' ..." % cluster_name

            event_timeout = times.ParseDuration(
                properties.VALUES.code.minikube_event_timeout.Get(
                    required=True)).total_seconds

            with console_io.ProgressBar(start_msg) as progress_bar:
                for json_obj in run_subprocess.StreamOutputJson(
                        cmd, event_timeout_sec=event_timeout,
                        show_stderr=debug):
                    if debug:
                        print('minikube', json_obj)

                    _HandleMinikubeStatusEvent(progress_bar, json_obj)
    except Exception as e:
        six.reraise(MinikubeStartError, e, sys.exc_info()[2])
コード例 #14
0
def _TokenExpiresWithinWindow(expiry_window,
                              token_expiry_time,
                              max_window_seconds=3600):
    """Determines if token_expiry_time is within expiry_window_duration.

  Calculates the amount of time between utcnow() and token_expiry_time and
  returns true, if that amount is less thank the provided duration window. All
  calculations are done in number of seconds for consistency.


  Args:
    expiry_window: string, Duration representing the amount of time between
      now and token_expiry_time to compare against.
    token_expiry_time: datetime, The time when token expires.
    max_window_seconds: int, Maximum size of expiry window, in seconds.

  Raises:
    ValueError: If expiry_window is invalid or can not be parsed.

  Returns:
    True if token is expired or will expire with in the provided window,
    False otherwise.
  """
    try:
        min_expiry = times.ParseDuration(expiry_window, default_suffix='s')
        if min_expiry.total_seconds > max_window_seconds:
            raise ValueError('Invalid expiry window duration [{}]: '
                             'Must be between 0s and 1h'.format(expiry_window))
    except times.Error as e:
        message = six.text_type(e).rstrip('.')
        raise ValueError('Error Parsing expiry window duration '
                         '[{}]: {}'.format(expiry_window, message))

    token_expiry_time = times.LocalizeDateTime(token_expiry_time,
                                               tzinfo=dateutil.tz.tzutc())
    window_end = times.GetDateTimePlusDuration(
        times.Now(tzinfo=dateutil.tz.tzutc()), min_expiry)

    return token_expiry_time <= window_end
コード例 #15
0
    def _ExpectCreateCertificate(self,
                                 parent_name,
                                 cert_id,
                                 request_id,
                                 public_key,
                                 lifetime='P30D',
                                 reusable_config=None,
                                 subject_config=None,
                                 include_name=False,
                                 labels=None):
        reusable_config = reusable_config or self.messages.ReusableConfigWrapper(
        )
        subject_config = subject_config or self.messsages.SubjectConfig(
            subject=self.messages.Subject,
            subjectAltName=self.messages.SubjectAltNames())
        lifetime = times.FormatDurationForJson(times.ParseDuration(lifetime))
        response = self.messages.Certificate(
            pemCertificate=self.test_cert,
            pemCertificateChain=[self.parent_cert])
        if include_name:
            response.name = '{}/certificate/{}'.format(parent_name, cert_id)

        request = self.messages.PrivatecaProjectsLocationsCertificateAuthoritiesCertificatesCreateRequest(
            certificateId=cert_id,
            requestId=request_id,
            parent=parent_name,
            certificate=self.messages.Certificate(
                lifetime=lifetime,
                config=self.messages.CertificateConfig(
                    reusableConfig=reusable_config,
                    subjectConfig=subject_config,
                    publicKey=self.messages.PublicKey(
                        type=self.messages.PublicKey.TypeValueValuesEnum.
                        PEM_RSA_KEY,
                        key=public_key)),
                labels=labels))

        self.mock_client.projects_locations_certificateAuthorities_certificates.Create.Expect(
            request=request, response=response)
コード例 #16
0
def TransformDuration(r,
                      start='',
                      end='',
                      parts=3,
                      precision=3,
                      calendar=True,
                      unit=1,
                      undefined=''):
    """Formats the resource as an ISO 8601 duration string.

  The [ISO 8601 Duration](https://en.wikipedia.org/wiki/ISO_8601#Durations)
  format is: "[-]P[nY][nM][nD][T[nH][nM][n[.m]S]]". The 0 duration is "P0".
  Otherwise at least one part will always be displayed. Negative durations are
  prefixed by "-". "T" disambiguates months "P2M" to the left of "T" and minutes
  "PT5M" to the right.

  If the resource is a datetime then the duration of `resource - current_time`
  is returned.

  Args:
    r: A JSON-serializable object.
    start: The name of a start time attribute in the resource. The duration of
      the `end - start` time attributes in resource is returned. If `end` is
      not specified then the current time is used.
    end: The name of an end time attribute in the resource. Defaults to
      the current time if omitted. Ignored if `start` is not specified.
    parts: Format at most this many duration parts starting with largest
      non-zero part.
    precision: Format the last duration part with precision digits after the
      decimal point. Trailing "0" and "." are always stripped.
    calendar: Allow time units larger than hours in formated durations if true.
      Durations specifying hours or smaller units are exact across daylight
      savings time boundaries. On by default. Use calendar=false to disable.
      For example, if `calendar=true` then at the daylight savings boundary
      2016-03-13T01:00:00 + P1D => 2016-03-14T01:00:00 but 2016-03-13T01:00:00 +
      PT24H => 2016-03-14T03:00:00. Similarly, a +P1Y duration will be inexact
      but "calendar correct", yielding the same month and day number next year,
      even in leap years.
    unit: Divide the resource numeric value by _unit_ to yield seconds.
    undefined: Returns this value if the resource is not a valid timestamp.

  Returns:
    The ISO 8601 duration string for r or undefined if r is not a duration.
  """
    try:
        parts = int(parts)
        precision = int(precision)
    except ValueError:
        return undefined
    calendar = GetBooleanArgValue(calendar)

    if start:
        # Duration of ((end or Now()) - start).

        # Get the datetime of both.
        try:
            start_datetime = times.ParseDateTime(_GetKeyValue(r, start))
            end_value = _GetKeyValue(r, end) if end else None
            if end_value:
                end_datetime = times.ParseDateTime(end_value)
            else:
                end_datetime = times.Now(tzinfo=start_datetime.tzinfo)
        except (AttributeError, ImportError, TypeError, ValueError):
            return undefined

        # Finally format the duration of the delta.
        delta = end_datetime - start_datetime
        return times.GetDurationFromTimeDelta(
            delta=delta, calendar=calendar).Format(parts=parts,
                                                   precision=precision)

    # Check if the resource is a float duration.
    try:
        duration = times.ParseDuration('PT{0}S'.format(float(r) / unit),
                                       calendar=calendar)
        return duration.Format(parts=parts, precision=precision)
    except (TypeError, ValueError):
        pass

    # Check if the resource is an ISO 8601 duration.
    try:
        duration = times.ParseDuration(r)
        return duration.Format(parts=parts, precision=precision)
    except (AttributeError, TypeError, ValueError):
        pass

    # Check if the resource is a datetime.
    try:
        start_datetime = times.ParseDateTime(r)
    except (AttributeError, ImportError, TypeError, ValueError):
        return undefined

    # Format the duration of (now - r).
    end_datetime = times.Now(tzinfo=start_datetime.tzinfo)
    delta = end_datetime - start_datetime
    return times.GetDurationFromTimeDelta(
        delta=delta, calendar=calendar).Format(parts=parts,
                                               precision=precision)
コード例 #17
0
  def Run(self, args):
    """This is what gets called when the user runs this command.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation.

    Returns:
      Some value that we want to have printed later.

    Raises:
      FailedBuildException: If the build is completed and not 'SUCCESS'.
    """

    project = properties.VALUES.core.project.Get(required=True)
    safe_project = project.replace(':', '_')
    safe_project = safe_project.replace('.', '_')
    # The string 'google' is not allowed in bucket names.
    safe_project = safe_project.replace('google', 'elgoog')

    default_bucket_name = '{}_cloudbuild'.format(safe_project)

    default_gcs_source = False
    if args.gcs_source_staging_dir is None:
      default_gcs_source = True
      args.gcs_source_staging_dir = 'gs://{}/source'.format(default_bucket_name)

    client = cloudbuild_util.GetClientInstance()
    messages = cloudbuild_util.GetMessagesModule()

    gcs_client = storage_api.StorageClient()

    # First, create the build request.
    build_timeout = properties.VALUES.builds.timeout.Get()

    if build_timeout is not None:
      try:
        # A bare number is interpreted as seconds.
        build_timeout_secs = int(build_timeout)
      except ValueError:
        build_timeout_duration = times.ParseDuration(build_timeout)
        build_timeout_secs = int(build_timeout_duration.total_seconds)
      timeout_str = six.text_type(build_timeout_secs) + 's'
    else:
      timeout_str = None

    if args.tag is not None:
      if (properties.VALUES.builds.check_tag.GetBool() and
          'gcr.io/' not in args.tag):
        raise c_exceptions.InvalidArgumentException(
            '--tag',
            'Tag value must be in the gcr.io/* or *.gcr.io/* namespace.')
      if properties.VALUES.builds.use_kaniko.GetBool():
        if args.no_cache:
          ttl = '0h'
        else:
          ttl = '{}h'.format(properties.VALUES.builds.kaniko_cache_ttl.Get())
        build_config = messages.Build(
            steps=[
                messages.BuildStep(
                    name=properties.VALUES.builds.kaniko_image.Get(),
                    args=[
                        '--destination', args.tag, '--cache', 'true',
                        '--cache-ttl', ttl
                    ],
                ),
            ],
            timeout=timeout_str,
            substitutions=cloudbuild_util.EncodeSubstitutions(
                args.substitutions, messages))
      else:
        if args.no_cache:
          raise c_exceptions.InvalidArgumentException(
              'no-cache',
              'Cannot specify --no-cache if builds/use_kaniko property is '
              'False')
        build_config = messages.Build(
            images=[args.tag],
            steps=[
                messages.BuildStep(
                    name='gcr.io/cloud-builders/docker',
                    args=[
                        'build', '--network', 'cloudbuild', '--no-cache', '-t',
                        args.tag, '.'
                    ],
                ),
            ],
            timeout=timeout_str,
            substitutions=cloudbuild_util.EncodeSubstitutions(
                args.substitutions, messages))
    elif args.config is not None:
      if args.no_cache:
        raise c_exceptions.ConflictingArgumentsException(
            '--config', '--no-cache')
      if not args.config:
        raise c_exceptions.InvalidArgumentException(
            '--config', 'Config file path must not be empty.')
      build_config = config.LoadCloudbuildConfigFromPath(
          args.config, messages, params=args.substitutions)
    else:
      raise c_exceptions.OneOfArgumentsRequiredException(
          ['--tag', '--config'],
          'Requires either a docker tag or a config file.')

    # If timeout was set by flag, overwrite the config file.
    if timeout_str:
      build_config.timeout = timeout_str

    # --no-source overrides the default --source.
    if not args.IsSpecified('source') and args.no_source:
      args.source = None

    gcs_source_staging = None
    if args.source:
      suffix = '.tgz'
      if args.source.startswith('gs://') or os.path.isfile(args.source):
        _, suffix = os.path.splitext(args.source)

      # Next, stage the source to Cloud Storage.
      staged_object = '{stamp}-{uuid}{suffix}'.format(
          stamp=times.GetTimeStampFromDateTime(times.Now()),
          uuid=uuid.uuid4().hex,
          suffix=suffix,
      )
      gcs_source_staging_dir = resources.REGISTRY.Parse(
          args.gcs_source_staging_dir, collection='storage.objects')

      # We create the bucket (if it does not exist) first. If we do an existence
      # check and then create the bucket ourselves, it would be possible for an
      # attacker to get lucky and beat us to creating the bucket. Block on this
      # creation to avoid this race condition.
      gcs_client.CreateBucketIfNotExists(gcs_source_staging_dir.bucket)

      # If no bucket is specified (for the source `default_gcs_source`), check
      # that the default bucket is also owned by the project (b/33046325).
      if default_gcs_source:
        # This request returns only the buckets owned by the project.
        bucket_list_req = gcs_client.messages.StorageBucketsListRequest(
            project=project, prefix=default_bucket_name)
        bucket_list = gcs_client.client.buckets.List(bucket_list_req)
        found_bucket = False
        for bucket in bucket_list.items:
          if bucket.id == default_bucket_name:
            found_bucket = True
            break
        if not found_bucket:
          if default_gcs_source:
            raise c_exceptions.RequiredArgumentException(
                'gcs_source_staging_dir',
                'A bucket with name {} already exists and is owned by '
                'another project. Specify a bucket using '
                '--gcs_source_staging_dir.'.format(default_bucket_name))

      if gcs_source_staging_dir.object:
        staged_object = gcs_source_staging_dir.object + '/' + staged_object
      gcs_source_staging = resources.REGISTRY.Create(
          collection='storage.objects',
          bucket=gcs_source_staging_dir.bucket,
          object=staged_object)

      if args.source.startswith('gs://'):
        gcs_source = resources.REGISTRY.Parse(
            args.source, collection='storage.objects')
        staged_source_obj = gcs_client.Rewrite(gcs_source, gcs_source_staging)
        build_config.source = messages.Source(
            storageSource=messages.StorageSource(
                bucket=staged_source_obj.bucket,
                object=staged_source_obj.name,
                generation=staged_source_obj.generation,
            ))
      else:
        if not os.path.exists(args.source):
          raise c_exceptions.BadFileException(
              'could not find source [{src}]'.format(src=args.source))
        if os.path.isdir(args.source):
          source_snapshot = snapshot.Snapshot(args.source,
                                              ignore_file=args.ignore_file)
          size_str = resource_transform.TransformSize(
              source_snapshot.uncompressed_size)
          log.status.Print(
              'Creating temporary tarball archive of {num_files} file(s)'
              ' totalling {size} before compression.'.format(
                  num_files=len(source_snapshot.files), size=size_str))
          staged_source_obj = source_snapshot.CopyTarballToGCS(
              gcs_client, gcs_source_staging, ignore_file=args.ignore_file)
          build_config.source = messages.Source(
              storageSource=messages.StorageSource(
                  bucket=staged_source_obj.bucket,
                  object=staged_source_obj.name,
                  generation=staged_source_obj.generation,
              ))
        elif os.path.isfile(args.source):
          unused_root, ext = os.path.splitext(args.source)
          if ext not in _ALLOWED_SOURCE_EXT:
            raise c_exceptions.BadFileException(
                'Local file [{src}] is none of ' +
                ', '.join(_ALLOWED_SOURCE_EXT))
          log.status.Print('Uploading local file [{src}] to '
                           '[gs://{bucket}/{object}].'.format(
                               src=args.source,
                               bucket=gcs_source_staging.bucket,
                               object=gcs_source_staging.object,
                           ))
          staged_source_obj = gcs_client.CopyFileToGCS(args.source,
                                                       gcs_source_staging)
          build_config.source = messages.Source(
              storageSource=messages.StorageSource(
                  bucket=staged_source_obj.bucket,
                  object=staged_source_obj.name,
                  generation=staged_source_obj.generation,
              ))
    else:
      # No source
      if not args.no_source:
        raise c_exceptions.InvalidArgumentException(
            '--no-source', 'To omit source, use the --no-source flag.')

    if args.gcs_log_dir:
      gcs_log_dir = resources.REGISTRY.Parse(
          args.gcs_log_dir, collection='storage.objects')

      build_config.logsBucket = ('gs://' + gcs_log_dir.bucket + '/' +
                                 gcs_log_dir.object)

    # Machine type.
    if args.machine_type is not None:
      machine_type = Submit._machine_type_flag_map.GetEnumForChoice(
          args.machine_type)
      if not build_config.options:
        build_config.options = messages.BuildOptions()
      build_config.options.machineType = machine_type

    # Disk size.
    if args.disk_size is not None:
      disk_size = compute_utils.BytesToGb(args.disk_size)
      if not build_config.options:
        build_config.options = messages.BuildOptions()
      build_config.options.diskSizeGb = int(disk_size)

    log.debug('submitting build: ' + repr(build_config))

    # Start the build.
    op = client.projects_builds.Create(
        messages.CloudbuildProjectsBuildsCreateRequest(
            build=build_config, projectId=properties.VALUES.core.project.Get()))
    json = encoding.MessageToJson(op.metadata)
    build = encoding.JsonToMessage(messages.BuildOperationMetadata, json).build

    build_ref = resources.REGISTRY.Create(
        collection='cloudbuild.projects.builds',
        projectId=build.projectId,
        id=build.id)

    log.CreatedResource(build_ref)
    if build.logUrl:
      log.status.Print(
          'Logs are available at [{log_url}].'.format(log_url=build.logUrl))
    else:
      log.status.Print('Logs are available in the Cloud Console.')

    # If the command is run --async, we just print out a reference to the build.
    if args.async:
      return build

    mash_handler = execution.MashHandler(
        execution.GetCancelBuildHandler(client, messages, build_ref))

    # Otherwise, logs are streamed from GCS.
    with execution_utils.CtrlCSection(mash_handler):
      build = cb_logs.CloudBuildClient(client, messages).Stream(build_ref)

    if build.status == messages.Build.StatusValueValuesEnum.TIMEOUT:
      log.status.Print(
          'Your build timed out. Use the [--timeout=DURATION] flag to change '
          'the timeout threshold.')

    if build.status != messages.Build.StatusValueValuesEnum.SUCCESS:
      raise FailedBuildException(build)

    return build
コード例 #18
0
def ParseValidityFlag(args):
    return times.FormatDurationForJson(times.ParseDuration(args.validity))
コード例 #19
0
ファイル: submit.py プロジェクト: krisztinagy/master_thesis
  def Run(self, args):
    """This is what gets called when the user runs this command.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation.

    Returns:
      Some value that we want to have printed later.

    Raises:
      FailedBuildException: If the build is completed and not 'SUCCESS'.
    """

    project = properties.VALUES.core.project.Get()
    safe_project = project.replace(':', '_')
    safe_project = safe_project.replace('.', '_')
    # The string 'google' is not allowed in bucket names.
    safe_project = safe_project.replace('google', 'elgoog')

    default_bucket_name = '{}_cloudbuild'.format(safe_project)

    default_gcs_source = False
    if args.gcs_source_staging_dir is None:
      default_gcs_source = True
      args.gcs_source_staging_dir = 'gs://{}/source'.format(default_bucket_name)

    default_gcs_log_dir = False
    if args.gcs_log_dir is None:
      default_gcs_log_dir = True
      args.gcs_log_dir = 'gs://{}/logs'.format(default_bucket_name)

    client = cloudbuild_util.GetClientInstance()
    messages = cloudbuild_util.GetMessagesModule()

    gcs_client = storage_api.StorageClient()

    # First, create the build request.
    build_timeout = properties.VALUES.container.build_timeout.Get()

    if build_timeout is not None:
      try:
        # A bare number is interpreted as seconds.
        build_timeout_secs = int(build_timeout)
      except ValueError:
        build_timeout_duration = times.ParseDuration(build_timeout)
        build_timeout_secs = int(build_timeout_duration.total_seconds)
      timeout_str = str(build_timeout_secs) + 's'
    else:
      timeout_str = None

    if args.tag:
      if 'gcr.io/' not in args.tag:
        raise c_exceptions.InvalidArgumentException(
            '--tag',
            'Tag value must be in the gcr.io/* or *.gcr.io/* namespace.')
      build_config = messages.Build(
          images=[args.tag],
          steps=[
              messages.BuildStep(
                  name='gcr.io/cloud-builders/docker',
                  args=['build', '--no-cache', '-t', args.tag, '.'],
              ),
          ],
          timeout=timeout_str,
          substitutions=cloudbuild_util.EncodeSubstitutions(args.substitutions,
                                                            messages)
      )
    elif args.config:
      build_config = config.LoadCloudbuildConfigFromPath(
          args.config, messages, params=args.substitutions)

    # If timeout was set by flag, overwrite the config file.
    if timeout_str:
      build_config.timeout = timeout_str

    suffix = '.tgz'
    if args.source.startswith('gs://') or os.path.isfile(args.source):
      _, suffix = os.path.splitext(args.source)

    # Next, stage the source to Cloud Storage.
    staged_object = '{stamp}{suffix}'.format(
        stamp=times.GetTimeStampFromDateTime(times.Now()),
        suffix=suffix,
    )
    gcs_source_staging_dir = resources.REGISTRY.Parse(
        args.gcs_source_staging_dir, collection='storage.objects')

    # We first try to create the bucket, before doing all the checks, in order
    # to avoid a race condition. If we do the check first, an attacker could
    # be lucky enough to create the bucket after the check and before this
    # bucket creation.
    gcs_client.CreateBucketIfNotExists(gcs_source_staging_dir.bucket)

    # If no bucket is specified (for the source `default_gcs_source` or for the
    # logs `default_gcs_log_dir`), check that the default bucket is also owned
    # by the project (b/33046325).
    if default_gcs_source or default_gcs_log_dir:
      # This request returns only the buckets owned by the project.
      bucket_list_req = gcs_client.messages.StorageBucketsListRequest(
          project=project,
          prefix=default_bucket_name)
      bucket_list = gcs_client.client.buckets.List(bucket_list_req)
      found_bucket = False
      for bucket in bucket_list.items:
        if bucket.id == default_bucket_name:
          found_bucket = True
          break
      if not found_bucket:
        if default_gcs_source:
          raise c_exceptions.RequiredArgumentException(
              'gcs_source_staging_dir',
              'A bucket with name {} already exists and is owned by '
              'another project. Specify a bucket using '
              '--gcs_source_staging_dir.'.format(default_bucket_name))
        elif default_gcs_log_dir:
          raise c_exceptions.RequiredArgumentException(
              'gcs-log-dir',
              'A bucket with name {} already exists and is owned by '
              'another project. Specify a bucket to hold build logs '
              'using --gcs-log-dir.'.format(default_bucket_name))

    if gcs_source_staging_dir.object:
      staged_object = gcs_source_staging_dir.object + '/' + staged_object

    gcs_source_staging = resources.REGISTRY.Create(
        collection='storage.objects',
        bucket=gcs_source_staging_dir.bucket,
        object=staged_object)

    if args.source.startswith('gs://'):
      gcs_source = resources.REGISTRY.Parse(
          args.source, collection='storage.objects')
      staged_source_obj = gcs_client.Rewrite(gcs_source, gcs_source_staging)
      build_config.source = messages.Source(
          storageSource=messages.StorageSource(
              bucket=staged_source_obj.bucket,
              object=staged_source_obj.name,
              generation=staged_source_obj.generation,
          ))
    else:
      if not os.path.exists(args.source):
        raise c_exceptions.BadFileException(
            'could not find source [{src}]'.format(src=args.source))
      if os.path.isdir(args.source):
        source_snapshot = snapshot.Snapshot(args.source)
        size_str = resource_transform.TransformSize(
            source_snapshot.uncompressed_size)
        log.status.Print(
            'Creating temporary tarball archive of {num_files} file(s)'
            ' totalling {size} before compression.'.format(
                num_files=len(source_snapshot.files),
                size=size_str))
        staged_source_obj = source_snapshot.CopyTarballToGCS(
            gcs_client, gcs_source_staging)
        build_config.source = messages.Source(
            storageSource=messages.StorageSource(
                bucket=staged_source_obj.bucket,
                object=staged_source_obj.name,
                generation=staged_source_obj.generation,
            ))
      elif os.path.isfile(args.source):
        unused_root, ext = os.path.splitext(args.source)
        if ext not in _ALLOWED_SOURCE_EXT:
          raise c_exceptions.BadFileException(
              'Local file [{src}] is none of '+', '.join(_ALLOWED_SOURCE_EXT))
        log.status.Print(
            'Uploading local file [{src}] to '
            '[gs://{bucket}/{object}].'.format(
                src=args.source,
                bucket=gcs_source_staging.bucket,
                object=gcs_source_staging.object,
            ))
        staged_source_obj = gcs_client.CopyFileToGCS(
            storage_util.BucketReference.FromBucketUrl(
                gcs_source_staging.bucket),
            args.source, gcs_source_staging.object)
        build_config.source = messages.Source(
            storageSource=messages.StorageSource(
                bucket=staged_source_obj.bucket,
                object=staged_source_obj.name,
                generation=staged_source_obj.generation,
            ))

    gcs_log_dir = resources.REGISTRY.Parse(
        args.gcs_log_dir, collection='storage.objects')

    if gcs_log_dir.bucket != gcs_source_staging.bucket:
      # Create the logs bucket if it does not yet exist.
      gcs_client.CreateBucketIfNotExists(gcs_log_dir.bucket)
    build_config.logsBucket = 'gs://'+gcs_log_dir.bucket+'/'+gcs_log_dir.object

    log.debug('submitting build: '+repr(build_config))

    # Start the build.
    op = client.projects_builds.Create(
        messages.CloudbuildProjectsBuildsCreateRequest(
            build=build_config,
            projectId=properties.VALUES.core.project.Get()))
    json = encoding.MessageToJson(op.metadata)
    build = encoding.JsonToMessage(messages.BuildOperationMetadata, json).build

    build_ref = resources.REGISTRY.Create(
        collection='cloudbuild.projects.builds',
        projectId=build.projectId,
        id=build.id)

    log.CreatedResource(build_ref)
    if build.logUrl:
      log.status.Print('Logs are available at [{log_url}].'.format(
          log_url=build.logUrl))
    else:
      log.status.Print('Logs are available in the Cloud Console.')

    # If the command is run --async, we just print out a reference to the build.
    if args.async:
      return build

    mash_handler = execution.MashHandler(
        execution.GetCancelBuildHandler(client, messages, build_ref))

    # Otherwise, logs are streamed from GCS.
    with execution_utils.CtrlCSection(mash_handler):
      build = cb_logs.CloudBuildClient(client, messages).Stream(build_ref)

    if build.status == messages.Build.StatusValueValuesEnum.TIMEOUT:
      log.status.Print(
          'Your build timed out. Use the [--timeout=DURATION] flag to change '
          'the timeout threshold.')

    if build.status != messages.Build.StatusValueValuesEnum.SUCCESS:
      raise FailedBuildException(build)

    return build
コード例 #20
0
def CreateBuild(messages, build_timeout, build_and_push, staged_source, image,
                dockerfile_path, app_name, app_version, config_path, namespace,
                expose_port, gcs_config_staging_path, cluster, location,
                build_tags):
    """Creates the Cloud Build config to run.

  Args:
    messages: Cloud Build messages module. i.e., the return value of
      cloudbuild_util.GetMessagesModule().
    build_timeout: An optional maximum time a build is run before it times out.
      For example, "2h15m5s" is 2 hours, 15 minutes, and 5 seconds. If you do
      not specify a unit, seconds is assumed. If this value is None, a timeout
      is not set.
    build_and_push: If True, the created build will have Build and Push steps.
    staged_source: An optional GCS object for a staged source repository. The
      object must have bucket, name, and generation fields. If this value is
      None, the created build will not have a source.
    image: The image that will deployed and optionally built beforehand. The
      image can include a tag or digest.
    dockerfile_path: A path to the source repository's Dockerfile, relative to
    the source repository's root directory.
    app_name: An app name that is set to a substitution variable.
    app_version: An app version that is set to a substitution variable.
    config_path: An optional path to the source repository's Kubernetes configs,
      relative to the source repository's root directory that is set to a
      substitution variable. If this value is None, the substitution variable is
      set to '' to indicate its absence.
    namespace: A Kubernetes namespace of the cluster to deploy to that
      is set to a substitution variable.
    expose_port: An optional port that the deployed application listens to that
      is set to a substitution variable. If this value is None, the substitution
      variable is set to 0 to indicate its absence.
    gcs_config_staging_path: An optional path to a GCS subdirectory to copy
      application configs that is set to a substitution variable. If this value
      is None, the substitution variable is set to '' to indicate its absence.
    cluster: The name of the target cluster to deploy to.
    location: The zone/region of the target cluster to deploy to.
    build_tags: Tags to append to build tags in additional to default tags.

  Returns:
    messages.Build, the Cloud Build config.
  """

    build = messages.Build()

    if build_timeout is not None:
        try:
            # A bare number is interpreted as seconds.
            build_timeout_secs = int(build_timeout)
        except ValueError:
            build_timeout_duration = times.ParseDuration(build_timeout)
            build_timeout_secs = int(build_timeout_duration.total_seconds)
        build.timeout = six.text_type(build_timeout_secs) + 's'

    if staged_source:
        build.source = messages.Source(storageSource=messages.StorageSource(
            bucket=staged_source.bucket,
            object=staged_source.name,
            generation=staged_source.generation))

    if config_path is None:
        config_path = ''

    if not expose_port:
        expose_port = '0'
    else:
        expose_port = str(expose_port)

    build.steps = []

    if build_and_push:
        build.steps.append(
            messages.BuildStep(id=_BUILD_BUILD_STEP_ID,
                               name='gcr.io/cloud-builders/docker',
                               args=[
                                   'build', '--network', 'cloudbuild',
                                   '--no-cache', '-t', image, '-f',
                                   '${}'.format(_DOCKERFILE_PATH_SUB_VAR), '.'
                               ]))
        build.steps.append(
            messages.BuildStep(id=_PUSH_BUILD_STEP_ID,
                               name='gcr.io/cloud-builders/docker',
                               args=[
                                   'push',
                                   image,
                               ]))

    build.steps.append(
        messages.BuildStep(
            id=_PREPARE_DEPLOY_BUILD_STEP_ID,
            name=_GKE_DEPLOY_PROD,
            args=[
                'prepare', '--filename=${}'.format(_K8S_YAML_PATH_SUB_VAR),
                '--image={}'.format(image),
                '--app=${}'.format(_APP_NAME_SUB_VAR),
                '--version={}'.format(app_version),
                '--namespace=${}'.format(_K8S_NAMESPACE_SUB_VAR),
                '--output=output', '--annotation=gcb-build-id=$BUILD_ID',
                '--expose=${}'.format(_EXPOSE_PORT_SUB_VAR)
            ],
        ))
    build.steps.append(
        messages.BuildStep(id=_SAVE_CONFIGS_BUILD_STEP_ID,
                           name='gcr.io/cloud-builders/gsutil',
                           entrypoint='sh',
                           args=['-c', _SAVE_CONFIGS_SCRIPT]))
    build.steps.append(
        messages.BuildStep(
            id=_APPLY_DEPLOY_BUILD_STEP_ID,
            name=_GKE_DEPLOY_PROD,
            args=[
                'apply',
                '--filename=output/expanded',
                '--namespace=${}'.format(_K8S_NAMESPACE_SUB_VAR),
                '--cluster=${}'.format(_GKE_CLUSTER_SUB_VAR),
                '--location=${}'.format(_GKE_LOCATION_SUB_VAR),
                '--timeout=24h'  # Set this to max value allowed for a build so that
                # this step never times out. We prefer the timeout given to the build
                # to take precedence.
            ],
        ))

    build.substitutions = cloudbuild_util.EncodeSubstitutions(
        _BuildSubstitutionsDict(dockerfile_path, app_name, config_path,
                                namespace, expose_port, cluster, location,
                                gcs_config_staging_path), messages)

    build.tags = _DEFAULT_BUILD_TAGS[:]
    if build_tags:
        for tag in build_tags:
            build.tags.append(tag)

    build.options = messages.BuildOptions()
    build.options.substitutionOption = messages.BuildOptions.SubstitutionOptionValueValuesEnum.ALLOW_LOOSE

    return build
コード例 #21
0
def CreatePRPreviewBuildTrigger(messages, name, description, build_timeout,
                                github_repo_owner, github_repo_name,
                                pr_pattern, preview_expiry_days,
                                comment_control, dockerfile_path, app_name,
                                config_path, expose_port,
                                gcs_config_staging_path, cluster, location,
                                build_tags, build_trigger_tags):
    """Creates the Cloud BuildTrigger config that deploys an application when triggered by a PR create/update.

  Args:
    messages: Cloud Build messages module. This is the value returned from
      cloudbuild_util.GetMessagesModule().
    name: Trigger name, which must be unique amongst all triggers in a project.
    description: Trigger description.
    build_timeout: An optional maximum time a triggered build is run before it
      times out. For example, "2h15m5s" is 2 hours, 15 minutes, and 5 seconds.
      If you do not specify a unit, seconds is assumed. If this value is None, a
      timeout is not set.
    github_repo_owner: A GitHub repo owner to be used in the trigger's github
      field.
    github_repo_name: A GitHub repo name to be used in the trigger's github
      field.
    pr_pattern: A regex value that is the base branch that the PR is targeting,
      which triggers the creation of the PR preview deployment.
    preview_expiry_days: How long a deployed preview application can exist
      before it is expired, in days, that is set to a substitution variable.
    comment_control: Whether or not a user must comment /gcbrun to trigger
      the deployment build.
    dockerfile_path: An optional path to the source repository's Dockerfile,
      relative to the source repository's root directory that is set to a
      substitution variable. If this value is not provided, 'Dockerfile' is
      used.
    app_name: An optional app name that is set to a substitution variable.
      If this value is None, the substitution variable is set to '' to indicate
      its absence.
    config_path: An optional path to the source repository's Kubernetes configs,
      relative to the source repository's root directory that is set to a
      substitution variable. If this value is None, the substitution variable is
      set to '' to indicate its absence.
    expose_port: An optional port that the deployed application listens to that
      is set to a substitution variable. If this value is None, the substitution
      variable is set to 0 to indicate its absence.
    gcs_config_staging_path: An optional path to a GCS subdirectory to copy
      application configs that is set to a substitution variable. If this value
      is None, the substitution variable is set to '' to indicate its absence.
    cluster: The name of the target cluster to deploy to that is set to a
      substitution variable.
    location: The zone/region of the target cluster to deploy to that is set to
      a substitution variable.
    build_tags: Tags to append to build tags in addition to default tags.
    build_trigger_tags: Tags to append to build trigger tags in addition to
      default tags.

  Returns:
    messages.BuildTrigger, the Cloud BuildTrigger config.
  """

    substitutions = _BaseBuildSubstitutionsDict(dockerfile_path, app_name,
                                                config_path, expose_port,
                                                cluster, location,
                                                gcs_config_staging_path)
    substitutions[_PREVIEW_EXPIRY_SUB_VAR] = six.text_type(preview_expiry_days)

    build = messages.Build(
        steps=[
            _BuildBuildStep(messages, _IMAGE),
            _PushBuildStep(messages, _IMAGE),
            messages.BuildStep(id=_PREPARE_DEPLOY_BUILD_STEP_ID,
                               name=_GKE_DEPLOY_PROD,
                               entrypoint='sh',
                               args=['-c', _PREPARE_PREVIEW_DEPLOY_SCRIPT]),
            _SaveConfigsBuildStep(messages),
            messages.BuildStep(id=_APPLY_DEPLOY_BUILD_STEP_ID,
                               name=_GKE_DEPLOY_PROD,
                               entrypoint='sh',
                               args=['-c', _APPLY_PREVIEW_DEPLOY_SCRIPT]),
            messages.BuildStep(id=_ANNOTATE_PREVIEW_NAMESPACE_BUILD_STEP_ID,
                               name='gcr.io/cloud-builders/kubectl',
                               entrypoint='sh',
                               args=['-c', _ANNOTATE_PREVIEW_NAMESPACE_SCRIPT])
        ],
        substitutions=cloudbuild_util.EncodeSubstitutions(
            substitutions, messages),
        options=messages.BuildOptions(
            substitutionOption=messages.BuildOptions.
            SubstitutionOptionValueValuesEnum.ALLOW_LOOSE),
        images=[_IMAGE],
        artifacts=messages.Artifacts(
            objects=messages.ArtifactObjects(location='gs://' +
                                             _EXPANDED_CONFIGS_PATH_DYNAMIC,
                                             paths=['output/expanded/*'])))

    if build_timeout is not None:
        try:
            # A bare number is interpreted as seconds.
            build_timeout_secs = int(build_timeout)
        except ValueError:
            build_timeout_duration = times.ParseDuration(build_timeout)
            build_timeout_secs = int(build_timeout_duration.total_seconds)
        build.timeout = six.text_type(build_timeout_secs) + 's'

    build.tags = _DEFAULT_PR_PREVIEW_TAGS[:]
    if build_tags:
        for tag in build_tags:
            build.tags.append(tag)

    github_config = messages.GitHubEventsConfig(
        owner=github_repo_owner,
        name=github_repo_name,
        pullRequest=messages.PullRequestFilter(branch=pr_pattern))

    if comment_control:
        github_config.pullRequest.commentControl = messages.PullRequestFilter.CommentControlValueValuesEnum.COMMENTS_ENABLED

    build_trigger = messages.BuildTrigger(
        name=name,
        description=description,
        build=build,
        github=github_config,
        substitutions=cloudbuild_util.EncodeTriggerSubstitutions(
            substitutions, messages))

    build_trigger.tags = _DEFAULT_PR_PREVIEW_TAGS[:]
    if build_trigger_tags:
        for tag in build_trigger_tags:
            build_trigger.tags.append(tag)

    return build_trigger
コード例 #22
0
    def _CreateBuildFromArgs(self, args, messages):
        """Creates the Cloud Build config from the arguments.

    Args:
      args: argsparse object from the DeployGKE command.
      messages: Cloud Build messages module.

    Returns:
      messages.Build, the Cloud Build config.
    """
        build = messages.Build(steps=[], tags=_CLOUD_BUILD_DEPLOY_TAGS)

        if args.app_name:
            build.tags.append(args.app_name)

        build_timeout = properties.VALUES.builds.timeout.Get()

        if build_timeout is not None:
            try:
                # A bare number is interpreted as seconds.
                build_timeout_secs = int(build_timeout)
            except ValueError:
                build_timeout_duration = times.ParseDuration(build_timeout)
                build_timeout_secs = int(build_timeout_duration.total_seconds)
            build.timeout = six.text_type(build_timeout_secs) + 's'

        if args.source is None:
            if args.tag or args.tag_default:
                raise c_exceptions.RequiredArgumentException(
                    'SOURCE',
                    'required to build container image provided by --tag or --tag-default.'
                )
            if args.config:
                raise c_exceptions.RequiredArgumentException(
                    'SOURCE',
                    'required because --config is a relative path in the '
                    'source directory.')

        if args.source and args.image and not args.config:
            raise c_exceptions.InvalidArgumentException(
                'SOURCE', 'Source should not be provided when no Kubernetes '
                'configs and no docker builds are required.')

        if args.tag_default:
            if args.app_name:
                default_name = args.app_name
            elif os.path.isdir(args.source):
                default_name = os.path.basename(os.path.abspath(args.source))
            else:
                raise c_exceptions.InvalidArgumentException(
                    '--tag-default',
                    'No default container image name available. Please provide an '
                    'app name with --app-name, or provide a valid --tag.')

            if args.app_version:
                default_tag = args.app_version
            elif git.IsGithubRepository(
                    args.source) and not git.HasPendingChanges(args.source):
                default_tag = git.GetShortGitHeadRevision(args.source)
                if not default_tag:
                    raise c_exceptions.InvalidArgumentException(
                        '--tag-default',
                        'No default tag available, no commit sha at HEAD of source '
                        'repository available for tag. Please provide an app version '
                        'with --app-version, or provide a valid --tag.')
            else:
                raise c_exceptions.InvalidArgumentException(
                    '--tag-default',
                    'No default container image tag available. Please provide an app '
                    'version with --app-version, or provide a valid --tag.')

            args.tag = 'gcr.io/$PROJECT_ID/{name}:{tag}'.format(
                name=default_name, tag=default_tag)

        if args.tag:
            if (properties.VALUES.builds.check_tag.GetBool()
                    and 'gcr.io/' not in args.tag):
                raise c_exceptions.InvalidArgumentException(
                    '--tag',
                    'Tag value must be in the gcr.io/* or *.gcr.io/* namespace.'
                )
            build.steps.append(
                messages.BuildStep(
                    name='gcr.io/cloud-builders/docker',
                    args=[
                        'build', '--network', 'cloudbuild', '--no-cache', '-t',
                        args.tag, '.'
                    ],
                ))
            build.steps.append(
                messages.BuildStep(name='gcr.io/cloud-builders/docker',
                                   args=['push', args.tag]))

        if args.image and (properties.VALUES.builds.check_tag.GetBool()
                           and 'gcr.io/' not in args.image):
            raise c_exceptions.InvalidArgumentException(
                '--image',
                'Image value must be in the gcr.io/* or *.gcr.io/* namespace.')

        if args.expose and args.expose < 0:
            raise c_exceptions.InvalidArgumentException(
                'EXPOSE', 'port number is invalid')

        self._StageSourceAndConfigFiles(args, messages, build)

        image = args.image if args.image else args.tag

        deploy_step = messages.BuildStep(
            name=_GKE_DEPLOY_PROD,
            args=[
                'run',
                '--image={}'.format(image),
                '--cluster={}'.format(args.cluster),
                '--location={}'.format(args.location),
                '--namespace={}'.format(args.namespace),
                '--output=output',
                '--label=gcb-build-id=$BUILD_ID',
            ],
        )
        image_name = image.split('/')[-1]
        image_with_digest = image_name.split('@')
        image_with_tag = image_name.split(':')
        if args.app_name:
            deploy_step.args.append('--app={}'.format(args.app_name))
        else:
            if len(image_with_digest) > 1:
                deploy_step.args.append('--app={}'.format(
                    image_with_digest[0]))
            else:
                deploy_step.args.append('--app={}'.format(image_with_tag[0]))

        if args.app_version:
            deploy_step.args.append('--version={}'.format(args.app_version))
        elif len(image_with_digest) == 1 and len(image_with_tag) > 1:
            deploy_step.args.append('--version={}'.format(image_with_tag[1]))
        elif args.source:
            if git.IsGithubRepository(
                    args.source) and not git.HasPendingChanges(args.source):
                short_sha = git.GetShortGitHeadRevision(args.source)
                if short_sha:
                    deploy_step.args.append('--version={}'.format(short_sha))

        if args.config:
            deploy_step.args.append('--filename={}'.format(args.config))
        if args.expose:
            deploy_step.args.append('--expose={}'.format(args.expose))
        if build.timeout is not None:
            deploy_step.args.append('--timeout={}'.format(build.timeout))

        # Append before the gsutil copy step
        build.steps.insert(-1, deploy_step)
        return build
コード例 #23
0
def _ConvertProtoToIsoDuration(proto_duration_str):
    """Convert a given 'proto duration' string to an ISO8601 duration string."""
    return times.FormatDuration(times.ParseDuration(proto_duration_str, True))
コード例 #24
0
ファイル: flags.py プロジェクト: saranraju90/multik8s
def ParseValidityFlag(args):
  """Parses the validity from args."""
  return times.FormatDurationForJson(times.ParseDuration(args.validity))
コード例 #25
0
ファイル: submit_util.py プロジェクト: bopopescu/GoogleAPI
def CreateBuildConfig(tag, no_cache, messages, substitutions, arg_config,
                      is_specified_source, no_source, source,
                      gcs_source_staging_dir, ignore_file, arg_gcs_log_dir,
                      arg_machine_type, arg_disk_size):
    """Returns a build config."""
    # Get the build timeout.
    build_timeout = properties.VALUES.builds.timeout.Get()
    if build_timeout is not None:
        try:
            # A bare number is interpreted as seconds.
            build_timeout_secs = int(build_timeout)
        except ValueError:
            build_timeout_duration = times.ParseDuration(build_timeout)
            build_timeout_secs = int(build_timeout_duration.total_seconds)
        timeout_str = six.text_type(build_timeout_secs) + 's'
    else:
        timeout_str = None

    if tag is not None:
        if (properties.VALUES.builds.check_tag.GetBool()
                and 'gcr.io/' not in tag):
            raise c_exceptions.InvalidArgumentException(
                '--tag',
                'Tag value must be in the gcr.io/* or *.gcr.io/* namespace.')
        if properties.VALUES.builds.use_kaniko.GetBool():
            if no_cache:
                ttl = '0h'
            else:
                ttl = '{}h'.format(
                    properties.VALUES.builds.kaniko_cache_ttl.Get())
            build_config = messages.Build(
                steps=[
                    messages.BuildStep(
                        name=properties.VALUES.builds.kaniko_image.Get(),
                        args=[
                            '--destination',
                            tag,
                            '--cache',
                            '--cache-ttl',
                            ttl,
                            '--cache-dir',
                            '',
                        ],
                    ),
                ],
                timeout=timeout_str,
                substitutions=cloudbuild_util.EncodeSubstitutions(
                    substitutions, messages))
        else:
            if no_cache:
                raise c_exceptions.InvalidArgumentException(
                    'no-cache',
                    'Cannot specify --no-cache if builds/use_kaniko property is '
                    'False')
            build_config = messages.Build(
                images=[tag],
                steps=[
                    messages.BuildStep(
                        name='gcr.io/cloud-builders/docker',
                        args=[
                            'build', '--network', 'cloudbuild', '--no-cache',
                            '-t', tag, '.'
                        ],
                    ),
                ],
                timeout=timeout_str,
                substitutions=cloudbuild_util.EncodeSubstitutions(
                    substitutions, messages))
    elif arg_config is not None:
        if no_cache:
            raise c_exceptions.ConflictingArgumentsException(
                '--config', '--no-cache')
        if not arg_config:
            raise c_exceptions.InvalidArgumentException(
                '--config', 'Config file path must not be empty.')
        build_config = config.LoadCloudbuildConfigFromPath(
            arg_config, messages, params=substitutions)
    else:
        raise c_exceptions.OneOfArgumentsRequiredException(
            ['--tag', '--config'],
            'Requires either a docker tag or a config file.')

    # If timeout was set by flag, overwrite the config file.
    if timeout_str:
        build_config.timeout = timeout_str

    # Set the source for the build config.
    default_gcs_source = False
    default_bucket_name = None
    if gcs_source_staging_dir is None:
        default_gcs_source = True
        default_bucket_name = staging_bucket_util.GetDefaultStagingBucket()
        gcs_source_staging_dir = 'gs://{}/source'.format(default_bucket_name)
    gcs_client = storage_api.StorageClient()

    # --no-source overrides the default --source.
    if not is_specified_source and no_source:
        source = None

    gcs_source_staging = None
    if source:
        suffix = '.tgz'
        if source.startswith('gs://') or os.path.isfile(source):
            _, suffix = os.path.splitext(source)

        # Next, stage the source to Cloud Storage.
        staged_object = '{stamp}-{uuid}{suffix}'.format(
            stamp=times.GetTimeStampFromDateTime(times.Now()),
            uuid=uuid.uuid4().hex,
            suffix=suffix,
        )
        gcs_source_staging_dir = resources.REGISTRY.Parse(
            gcs_source_staging_dir, collection='storage.objects')

        # We create the bucket (if it does not exist) first. If we do an existence
        # check and then create the bucket ourselves, it would be possible for an
        # attacker to get lucky and beat us to creating the bucket. Block on this
        # creation to avoid this race condition.
        gcs_client.CreateBucketIfNotExists(gcs_source_staging_dir.bucket)

        # If no bucket is specified (for the source `default_gcs_source`), check
        # that the default bucket is also owned by the project (b/33046325).
        if default_gcs_source and not staging_bucket_util.BucketIsInProject(
                gcs_client, default_bucket_name):
            raise c_exceptions.RequiredArgumentException(
                'gcs-source-staging-dir',
                'A bucket with name {} already exists and is owned by '
                'another project. Specify a bucket using '
                '--gcs-source-staging-dir.'.format(default_bucket_name))

        if gcs_source_staging_dir.object:
            staged_object = gcs_source_staging_dir.object + '/' + staged_object
        gcs_source_staging = resources.REGISTRY.Create(
            collection='storage.objects',
            bucket=gcs_source_staging_dir.bucket,
            object=staged_object)

        if source.startswith('gs://'):
            gcs_source = resources.REGISTRY.Parse(source,
                                                  collection='storage.objects')
            staged_source_obj = gcs_client.Rewrite(gcs_source,
                                                   gcs_source_staging)
            build_config.source = messages.Source(
                storageSource=messages.StorageSource(
                    bucket=staged_source_obj.bucket,
                    object=staged_source_obj.name,
                    generation=staged_source_obj.generation,
                ))
        else:
            if not os.path.exists(source):
                raise c_exceptions.BadFileException(
                    'could not find source [{src}]'.format(src=source))
            if os.path.isdir(source):
                source_snapshot = snapshot.Snapshot(source,
                                                    ignore_file=ignore_file)
                size_str = resource_transform.TransformSize(
                    source_snapshot.uncompressed_size)
                log.status.Print(
                    'Creating temporary tarball archive of {num_files} file(s)'
                    ' totalling {size} before compression.'.format(
                        num_files=len(source_snapshot.files), size=size_str))
                staged_source_obj = source_snapshot.CopyTarballToGCS(
                    gcs_client, gcs_source_staging, ignore_file=ignore_file)
                build_config.source = messages.Source(
                    storageSource=messages.StorageSource(
                        bucket=staged_source_obj.bucket,
                        object=staged_source_obj.name,
                        generation=staged_source_obj.generation,
                    ))
            elif os.path.isfile(source):
                unused_root, ext = os.path.splitext(source)
                if ext not in _ALLOWED_SOURCE_EXT:
                    raise c_exceptions.BadFileException(
                        'Local file [{src}] is none of ' +
                        ', '.join(_ALLOWED_SOURCE_EXT))
                log.status.Print('Uploading local file [{src}] to '
                                 '[gs://{bucket}/{object}].'.format(
                                     src=source,
                                     bucket=gcs_source_staging.bucket,
                                     object=gcs_source_staging.object,
                                 ))
                staged_source_obj = gcs_client.CopyFileToGCS(
                    source, gcs_source_staging)
                build_config.source = messages.Source(
                    storageSource=messages.StorageSource(
                        bucket=staged_source_obj.bucket,
                        object=staged_source_obj.name,
                        generation=staged_source_obj.generation,
                    ))
    else:
        # No source
        if not no_source:
            raise c_exceptions.InvalidArgumentException(
                '--no-source', 'To omit source, use the --no-source flag.')

    # Set a Google Cloud Storage directory to hold build logs.
    if arg_gcs_log_dir:
        gcs_log_dir = resources.REGISTRY.Parse(arg_gcs_log_dir,
                                               collection='storage.objects')
        build_config.logsBucket = ('gs://' + gcs_log_dir.bucket + '/' +
                                   gcs_log_dir.object)

    # Set the machine type used to run the build.
    if arg_machine_type is not None:
        machine_type = flags.GetMachineType(arg_machine_type)
        if not build_config.options:
            build_config.options = messages.BuildOptions()
        build_config.options.machineType = machine_type

    # Set the disk size used to run the build.
    if arg_disk_size is not None:
        disk_size = compute_utils.BytesToGb(arg_disk_size)
        if not build_config.options:
            build_config.options = messages.BuildOptions()
        build_config.options.diskSizeGb = int(disk_size)

    return build_config
コード例 #26
0
def GetConfigurationChanges(args):
  """Returns a list of changes to Configuration, based on the flags set."""
  changes = []
  changes.extend(_GetScalingChanges(args))
  if _HasEnvChanges(args):
    changes.append(_GetEnvChanges(args))

  if _HasTrafficChanges(args):
    changes.append(_GetTrafficChanges(args))

  if _HasCloudSQLChanges(args):
    region = GetRegion(args)
    project = (
        getattr(args, 'project', None) or
        properties.VALUES.core.project.Get(required=True))
    _CheckCloudSQLApiEnablement()
    changes.append(config_changes.CloudSQLChanges(project, region, args))

  if _HasSecretsChanges(args):
    changes.extend(_GetSecretsChanges(args))

  if _HasConfigMapsChanges(args):
    changes.extend(_GetConfigMapsChanges(args))

  if 'no_traffic' in args and args.no_traffic:
    changes.append(config_changes.NoTrafficChange())

  if 'cpu' in args and args.cpu:
    changes.append(config_changes.ResourceChanges(cpu=args.cpu))
  if 'memory' in args and args.memory:
    changes.append(config_changes.ResourceChanges(memory=args.memory))
  if 'concurrency' in args and args.concurrency:
    changes.append(config_changes.ConcurrencyChanges(
        concurrency=args.concurrency))
  if 'timeout' in args and args.timeout:
    try:
      # A bare number is interpreted as seconds.
      timeout_secs = int(args.timeout)
    except ValueError:
      timeout_duration = times.ParseDuration(args.timeout)
      timeout_secs = int(timeout_duration.total_seconds)
    if timeout_secs <= 0:
      raise ArgumentError(
          'The --timeout argument must be a positive time duration.')
    changes.append(config_changes.TimeoutChanges(timeout=timeout_secs))
  if 'service_account' in args and args.service_account:
    changes.append(
        config_changes.ServiceAccountChanges(
            service_account=args.service_account))
  if _HasLabelChanges(args):
    additions = (
        args.labels
        if _FlagIsExplicitlySet(args, 'labels') else args.update_labels)
    diff = labels_util.Diff(
        additions=additions,
        subtractions=args.remove_labels,
        clear=args.clear_labels)
    if diff.MayHaveUpdates():
      changes.append(config_changes.LabelChanges(diff))
  if 'revision_suffix' in args and args.revision_suffix:
    changes.append(config_changes.RevisionNameChanges(args.revision_suffix))
  if 'vpc_connector' in args and args.vpc_connector:
    changes.append(config_changes.VpcConnectorChange(args.vpc_connector))
  if 'clear_vpc_connector' in args and args.clear_vpc_connector:
    changes.append(config_changes.ClearVpcConnectorChange())
  if 'connectivity' in args and args.connectivity:
    if args.connectivity == 'internal':
      changes.append(config_changes.EndpointVisibilityChange(True))
    elif args.connectivity == 'external':
      changes.append(config_changes.EndpointVisibilityChange(False))
  if 'command' in args and args.command is not None:
    # Allow passing an empty string here to reset the field
    changes.append(config_changes.ContainerCommandChange(args.command))
  if 'args' in args and args.args is not None:
    # Allow passing an empty string here to reset the field
    changes.append(config_changes.ContainerArgsChange(args.args))
  if _FlagIsExplicitlySet(args, 'port'):
    changes.append(config_changes.ContainerPortChange(port=args.port))
  if _FlagIsExplicitlySet(args, 'use_http2'):
    changes.append(config_changes.ContainerPortChange(use_http2=args.use_http2))
  return changes
コード例 #27
0
def CreateBuild(messages, build_timeout, build_and_push, staged_source, image,
                dockerfile_path, app_name, app_version, config_path, namespace,
                expose_port, gcs_config_staging_path, cluster, location,
                build_tags):
    """Creates the Cloud Build config to run.

  Args:
    messages: Cloud Build messages module. This is the value returned from
      cloudbuild_util.GetMessagesModule().
    build_timeout: An optional maximum time a build is run before it times out.
      For example, "2h15m5s" is 2 hours, 15 minutes, and 5 seconds. If you do
      not specify a unit, seconds is assumed. If this value is None, a timeout
      is not set.
    build_and_push: If True, the created build will have Build and Push steps.
    staged_source: An optional GCS object for a staged source repository. The
      object must have bucket, name, and generation fields. If this value is
      None, the created build will not have a source.
    image: The image that will deployed and optionally built beforehand. The
      image can include a tag or digest.
    dockerfile_path: An optional path to the source repository's Dockerfile,
      relative to the source repository's root directory. If this value is not
      provided, 'Dockerfile' is used.
    app_name: An optional app name that is set to a substitution variable.
      If this value is None, the substitution variable is set to '' to indicate
      its absence.
    app_version: A app version that is set to the deployed application's
      version. If this value is None, the version will be set to '' to indicate
      its absence.
    config_path: An optional path to the source repository's Kubernetes configs,
      relative to the source repository's root directory that is set to a
      substitution variable. If this value is None, the substitution variable is
      set to '' to indicate its absence.
    namespace: An optional Kubernetes namespace of the cluster to deploy to that
      is set to a substitution variable. If this value is None, the substitution
      variable is set to 'default'.
    expose_port: An optional port that the deployed application listens to that
      is set to a substitution variable. If this value is None, the substitution
      variable is set to 0 to indicate its absence.
    gcs_config_staging_path: An optional path to a GCS subdirectory to copy
      application configs that is set to a substitution variable. If this value
      is None, the substitution variable is set to '' to indicate its absence.
    cluster: The name of the target cluster to deploy to.
    location: The zone/region of the target cluster to deploy to.
    build_tags: Tags to append to build tags in addition to default tags.

  Returns:
    messages.Build, the Cloud Build config.
  """

    build = messages.Build()

    if build_timeout is not None:
        try:
            # A bare number is interpreted as seconds.
            build_timeout_secs = int(build_timeout)
        except ValueError:
            build_timeout_duration = times.ParseDuration(build_timeout)
            build_timeout_secs = int(build_timeout_duration.total_seconds)
        build.timeout = six.text_type(build_timeout_secs) + 's'

    if staged_source:
        build.source = messages.Source(storageSource=messages.StorageSource(
            bucket=staged_source.bucket,
            object=staged_source.name,
            generation=staged_source.generation))

    if config_path is None:
        config_path = ''

    if not expose_port:
        expose_port = '0'
    else:
        expose_port = six.text_type(expose_port)
    if app_version is None:
        app_version = ''

    build.steps = []

    if build_and_push:
        build.steps.append(_BuildBuildStep(messages, image))
        build.steps.append(_PushBuildStep(messages, image))

    build.steps.append(
        messages.BuildStep(
            id=_PREPARE_DEPLOY_BUILD_STEP_ID,
            name=_GKE_DEPLOY_PROD,
            args=[
                'prepare',
                '--filename=${}'.format(_K8S_YAML_PATH_SUB_VAR),
                '--image={}'.format(image),
                '--app=${}'.format(_APP_NAME_SUB_VAR),
                '--version={}'.format(app_version),
                '--namespace=${}'.format(_K8S_NAMESPACE_SUB_VAR),
                '--output=output',
                '--annotation=gcb-build-id=$BUILD_ID,${}'.format(
                    _K8S_ANNOTATIONS_SUB_VAR
                ),  # You cannot embed a substitution
                # variable in another, so gcb-build-id=$BUILD_ID must be hard-coded.
                '--expose=${}'.format(_EXPOSE_PORT_SUB_VAR)
            ],
        ))
    build.steps.append(_SaveConfigsBuildStep(messages))
    build.steps.append(
        messages.BuildStep(
            id=_APPLY_DEPLOY_BUILD_STEP_ID,
            name=_GKE_DEPLOY_PROD,
            args=[
                'apply',
                '--filename=output/expanded',
                '--namespace=${}'.format(_K8S_NAMESPACE_SUB_VAR),
                '--cluster=${}'.format(_GKE_CLUSTER_SUB_VAR),
                '--location=${}'.format(_GKE_LOCATION_SUB_VAR),
                '--timeout=24h'  # Set this to max value allowed for a build so that
                # this step never times out. We prefer the timeout given to the build
                # to take precedence.
            ],
        ))

    substitutions = _BaseBuildSubstitutionsDict(dockerfile_path, app_name,
                                                config_path, expose_port,
                                                cluster, location,
                                                gcs_config_staging_path)
    if namespace is None:
        namespace = 'default'
    substitutions[_K8S_NAMESPACE_SUB_VAR] = namespace

    build.substitutions = cloudbuild_util.EncodeSubstitutions(
        substitutions, messages)

    build.tags = _DEFAULT_TAGS[:]
    if build_tags:
        for tag in build_tags:
            build.tags.append(tag)

    build.options = messages.BuildOptions()
    build.options.substitutionOption = messages.BuildOptions.SubstitutionOptionValueValuesEnum.ALLOW_LOOSE

    if build_and_push:
        build.images = [image]

    build.artifacts = messages.Artifacts(
        objects=messages.ArtifactObjects(location='gs://' +
                                         _EXPANDED_CONFIGS_PATH_DYNAMIC,
                                         paths=['output/expanded/*']))

    return build