コード例 #1
0
 def ParseFutureDatetime(s):
     """Parses a string value into a future Datetime object."""
     dt = arg_parsers.Datetime.Parse(s)
     if dt < times.Now():
         raise arg_parsers.ArgumentTypeError(
             'Date/time must be in the future: {0}'.format(s))
     return dt
コード例 #2
0
    def Run(self, args):
        """Run the list command."""
        if args.include_expired:
            # The (deprecated, hidden) --include-expired argument is equivalent to
            # --include_inactive=unlimited
            log.warn(
                'The --include-expired flag has been deprecated. Please use '
                '--include-inactive=unlimited instead.')
            args.include_inactive = None
        project_id = properties.VALUES.core.project.Get(required=True)
        debugger = debug.Debugger(project_id)
        debuggee = debugger.FindDebuggee(args.target)
        logpoints = debuggee.ListBreakpoints(
            args.id_or_location_regexp,
            include_all_users=args.all_users,
            include_inactive=(args.include_inactive != 0),
            restrict_to_type=debugger.LOGPOINT_TYPE)

        # Filter any results more than include_inactive seconds old.
        # include_inactive may be None, which means we do not want to filter the
        # results.
        if args.include_inactive > 0:
            cutoff_time = (times.Now(times.UTC) -
                           datetime.timedelta(seconds=args.include_inactive))
            logpoints = [
                lp for lp in logpoints if _ShouldInclude(lp, cutoff_time)
            ]

        return logpoints
コード例 #3
0
def ParseExpireTime(expiration_value):
  """Parse flag value into Datetime format for expireTime."""
  # expiration_value could be in Datetime format or Duration format.
  datetime = (
      times.ParseDuration(expiration_value).GetRelativeDateTime(
          times.Now(times.UTC)))
  parsed_datetime = times.FormatDateTime(
      datetime, '%Y-%m-%dT%H:%M:%S.%6f%Ez', tzinfo=times.UTC)
  return parsed_datetime
コード例 #4
0
ファイル: triggers.py プロジェクト: PinTrees/novelhub
def RecentlyModified(update_time):
    """Checks if the trigger with the given update_time was recently modified.

  Args:
    update_time: str, the time when the trigger was last modified.

  Returns:
    True if the trigger was recently modified and might not be ready for use.
  """
    update_dt = times.ParseDateTime(update_time)
    max_duration = iso_duration.Duration(minutes=MAX_READY_LATENCY_MINUTES)
    ready_dt = times.GetDateTimePlusDuration(update_dt, max_duration)
    return times.Now() < ready_dt
コード例 #5
0
    def _WriteAgentLogs():
        """Writes logs from the GKE Connect agent pod to a temporary file."""
        logs, err = kube_client.Logs(namespace, agent_pod_name)
        if err:
            log.warning('Could not fetch agent pod logs: {}'.format(err))
            return

        _, tmp_file = tempfile.mkstemp(
            suffix='_{}.log'.format(times.Now().strftime('%Y%m%d-%H%M%S')),
            prefix='gke_connect_',
        )
        files.WriteFileContents(tmp_file, logs, private=True)
        log.status.Print('GKE Connect pod logs saved to [{}]'.format(tmp_file))
コード例 #6
0
def GenerateCertId():
    """Generate a certificate id with the date and two length 3 alphanum strings.

  E.G. YYYYMMDD-ABC-DEF.

  Returns:
    The generated certificate id string.
  """
    alphanum = string.ascii_uppercase + string.digits
    alphanum_rand1 = ''.join(random.choice(alphanum) for i in range(3))
    alphanum_rand2 = ''.join(random.choice(alphanum) for i in range(3))
    date_str = times.FormatDateTime(times.Now(), '%Y%m%d')
    return '{}-{}-{}'.format(date_str, alphanum_rand1, alphanum_rand2)
コード例 #7
0
    def testListDefaultSort(self):
        # Verify that list is sorted by default with active logpoints first,
        # inactive logpoints next, and both groups sorted by creation time.
        logpoints = [
            self.messages.Breakpoint(
                id='dummy-id-{0}'.format(i),
                action=self.messages.Breakpoint.ActionValueValuesEnum.LOG,
                logLevel=self.messages.Breakpoint.LogLevelValueValuesEnum.INFO,
                logMessageFormat='message',
                location=self.messages.SourceLocation(path='myfile', line=i))
            for i in range(0, 10)
        ]
        active_order = [7, 9, 0, 4, 2, 5]
        inactive_order = [1, 8, 6, 3]
        now = times.Now(times.UTC)
        self.StartObjectPatch(times, 'Now', return_value=now)
        base_time = now - datetime.timedelta(hours=1, minutes=1)
        offset_sec = 0
        for i in active_order:
            logpoints[i].createTime = (
                base_time +
                datetime.timedelta(seconds=offset_sec)).isoformat()
            offset_sec += 1
        offset_sec = 0
        for i in inactive_order:
            logpoints[i].isFinalState = True
            logpoints[i].createTime = (
                base_time +
                datetime.timedelta(seconds=offset_sec)).isoformat()
            logpoints[i].finalTime = now.isoformat()
            offset_sec += 1

        logpoints = [self.debuggee.AddTargetInfo(lp) for lp in logpoints]

        list_mock = self.StartObjectPatch(debug.Debuggee,
                                          'ListBreakpoints',
                                          return_value=logpoints)
        self.RunDebug(['logpoints', 'list'])
        list_mock.assert_called_once_with(
            None,
            resource_ids=[],
            include_all_users=True,
            include_inactive=True,
            restrict_to_type=debug.Debugger.LOGPOINT_TYPE)

        # Look for all the IDs in the expected order. Remove '\n' characters
        # from the output because '.' doesn't match newline, and '(.|\s)' causes
        # the Python re.search function to time out.
        self.AssertOutputMatches('.*'.join(
            logpoints[i].id for i in active_order + inactive_order),
                                 actual_filter=lambda s: s.replace('\n', ' '))
コード例 #8
0
def GetSSHKeyExpirationFromArgs(args):
    """Converts flags to an ssh key expiration in datetime and micros."""
    if args.ssh_key_expiration:
        # this argument is checked in ParseFutureDatetime to be sure that it
        # is not already expired.  I.e. the expiration should be in the future.
        expiration = args.ssh_key_expiration
    elif args.ssh_key_expire_after:
        expiration = times.Now() + datetime.timedelta(
            seconds=args.ssh_key_expire_after)
    else:
        return None, None

    expiration_micros = times.GetTimeStampFromDateTime(expiration) * 1e6
    return expiration, int(expiration_micros)
コード例 #9
0
def _PrepareSSHKeysValue(ssh_keys):
    """Returns a string appropriate for the metadata.

  Expired SSH keys are always removed.
  Then Values are taken from the tail until either all values are taken or
  _MAX_METADATA_VALUE_SIZE_IN_BYTES is reached, whichever comes first. The
  selected values are then reversed. Only values at the head of the list will be
  subject to removal.

  Args:
    ssh_keys: A list of keys. Each entry should be one key.

  Returns:
    A new-line-joined string of SSH keys.
  """
    keys = []
    bytes_consumed = 0

    now = times.LocalizeDateTime(times.Now(), times.UTC)

    for key in reversed(ssh_keys):
        try:
            expiration = _SSHKeyExpiration(key)
            expired = expiration is not None and expiration < now
            if expired:
                continue
        except (ValueError, times.DateTimeSyntaxError,
                times.DateTimeValueError) as exc:
            # Unable to get expiration, so treat it like it is unexpiring.
            log.warning(
                'Treating {0!r} as unexpiring, since unable to parse: {1}'.
                format(key, exc))

        num_bytes = len(key + '\n')
        if bytes_consumed + num_bytes > constants.MAX_METADATA_VALUE_SIZE_IN_BYTES:
            prompt_message = (
                'The following SSH key will be removed from your '
                'project because your SSH keys metadata value has '
                'reached its maximum allowed size of {0} bytes: {1}')
            prompt_message = prompt_message.format(
                constants.MAX_METADATA_VALUE_SIZE_IN_BYTES, key)
            console_io.PromptContinue(message=prompt_message,
                                      cancel_on_no=True)
        else:
            keys.append(key)
            bytes_consumed += num_bytes

    keys.reverse()
    return '\n'.join(keys)
コード例 #10
0
ファイル: transforms.py プロジェクト: sudocams/tech-club
def ParseExpireTime(s):
    """Return timedelta TTL for a cluster.

  Args:
    s: expireTime string timestamp in RFC3339 format.
  Returns:
    datetime.timedelta of time remaining before cluster expiration.
  Raises:
    TypeError, ValueError if time could not be parsed.
  """
    if not s:
        return None
    expire_dt = times.ParseDateTime(s)
    if not expire_dt:
        return None
    return expire_dt - times.Now(expire_dt.tzinfo)
コード例 #11
0
ファイル: list.py プロジェクト: barber223/AudioApp
 def Run(self, args):
   """Run the list command."""
   project_id = properties.VALUES.core.project.Get(required=True)
   debugger = debug.Debugger(project_id)
   debuggee = debugger.FindDebuggee(args.target)
   snapshots = debuggee.ListBreakpoints(
       args.location, resource_ids=args.ids, include_all_users=args.all_users,
       include_inactive=(args.include_inactive != 0),
       restrict_to_type=debugger.SNAPSHOT_TYPE)
   # Filter any results more than include_inactive seconds old.
   # include_inactive may be None, which means we do not want to filter the
   # results.
   if args.include_inactive:
     cutoff_time = (times.Now(times.UTC) -
                    datetime.timedelta(seconds=args.include_inactive))
     snapshots = [s for s in snapshots if _ShouldInclude(s, cutoff_time)]
   return snapshots
コード例 #12
0
 def _WaitForSSHKeysToPropagate(self, ssh_helper, remote, identity_file,
                                user, instance, options):
     """Waits for SSH keys to propagate in order to SSH to the instance."""
     ssh_helper.EnsureSSHKeyExists(
         self.client, user, instance,
         ssh_helper.GetProject(
             self.client,
             properties.VALUES.core.project.Get(required=True)),
         times.Now() + datetime.timedelta(seconds=300))
     ssh_poller = ssh.SSHPoller(remote=remote,
                                identity_file=identity_file,
                                options=options,
                                max_wait_ms=120 * 1000)
     try:
         ssh_poller.Poll(ssh_helper.env, force_connect=True)
     except retry.WaitException:
         raise ssh_utils.NetworkError()
コード例 #13
0
def GenerateCertId():
    """Generate a certificate id with the date and two length 3 alphanum strings.

  E.G. YYYYMMDD-ABC-DEF.

  Returns:
    The generated certificate id string.
  """
    # Avoid name collisions in certificate id generation. Normal random.choice
    # seeds on system time, which might not be sufficiently random of a seed.
    # SystemRandom uses other sources from the OS to generate the random ID.
    sys_rng = random.SystemRandom()
    alphanum = string.ascii_uppercase + string.digits
    alphanum_rand1 = ''.join(sys_rng.choice(alphanum) for i in range(3))
    alphanum_rand2 = ''.join(sys_rng.choice(alphanum) for i in range(3))
    date_str = times.FormatDateTime(times.Now(), '%Y%m%d')
    return '{}-{}-{}'.format(date_str, alphanum_rand1, alphanum_rand2)
コード例 #14
0
    def testListIncludeExclude(self):
        now = times.Now(times.UTC)
        self.StartObjectPatch(times, 'Now', return_value=now)
        create_time = now - datetime.timedelta(hours=1, minutes=1)
        logpoints = [
            self.messages.Breakpoint(
                id='dummy-id-{0}'.format(i),
                createTime=create_time.isoformat(),
                action=self.messages.Breakpoint.ActionValueValuesEnum.LOG,
                logLevel=self.messages.Breakpoint.LogLevelValueValuesEnum.INFO,
                isFinalState=True,
                logMessageFormat='message',
                location=self.messages.SourceLocation(path='myfile', line=i))
            for i in range(0, 10)
        ]
        exclude_indices = [0, 2, 4, 5, 7, 9]
        include_indices = [1, 3, 6, 8]
        for e in exclude_indices:
            # Set to a time before the default cutoff (so it will be excluded).
            logpoints[e].finalTime = (
                now - datetime.timedelta(seconds=301)).isoformat()
        for i in include_indices:
            # Set to a time after the default cutoff (so it will be included).
            logpoints[i].finalTime = (
                now - datetime.timedelta(seconds=299)).isoformat()
        included_ids = [logpoints[i].id for i in include_indices]
        excluded_ids = [logpoints[e].id for e in exclude_indices]
        logpoints = [self.debuggee.AddTargetInfo(lp) for lp in logpoints]

        list_mock = self.StartObjectPatch(debug.Debuggee,
                                          'ListBreakpoints',
                                          return_value=logpoints)
        self.RunDebug(['logpoints', 'list'])
        list_mock.assert_called_once_with(
            None,
            resource_ids=[],
            include_all_users=True,
            include_inactive=True,
            restrict_to_type=debug.Debugger.LOGPOINT_TYPE)

        for i in included_ids:
            self.AssertOutputContains(i)
        for e in excluded_ids:
            self.AssertOutputNotContains(e)
コード例 #15
0
ファイル: triggers.py プロジェクト: saranraju90/multik8s
def TriggerActiveTime(event_type, update_time):
  """Computes the time by which the trigger will become active.

  Args:
    event_type: str, the trigger's event type.
    update_time: str, the time when the trigger was last modified.

  Returns:
    The active time as a string, or None if the trigger is already active.
  """
  if not types.IsAuditLogType(event_type):
    # The delay only applies to Audit Log triggers.
    return None
  update_dt = times.ParseDateTime(update_time)
  delay = iso_duration.Duration(minutes=MAX_ACTIVE_DELAY_MINUTES)
  active_dt = times.GetDateTimePlusDuration(update_dt, delay)
  if times.Now() >= active_dt:
    return None
  return times.FormatDateTime(active_dt, fmt='%H:%M:%S', tzinfo=times.LOCAL)
コード例 #16
0
def _IsTTLSafe(ttl, obj):
    """Determines whether a GCS object is close to end-of-life.

  In order to reduce false negative rate (objects that are close to deletion but
  aren't marked as such) the returned filter is forward-adjusted with
  _TTL_MARGIN.

  Args:
    ttl: datetime.timedelta, TTL of objects, or None if no TTL.
    obj: storage object to check.

  Returns:
    True if the ojbect is safe or False if it is approaching end of life.
  """
    if ttl is None:
        return True
    now = times.Now(times.UTC)
    delta = ttl - _TTL_MARGIN
    return (now - obj.timeCreated) <= delta
コード例 #17
0
def CreateCloudEvent(event_id, event_type, event_source, event_data,
                     event_attributes):
  """Transform args to a valid cloud event.

  Args:
    event_id: The id of a published event.
    event_type: The event type of a published event.
    event_source: The event source of a published event.
    event_data: The event data of a published event.
    event_attributes: The event attributes of a published event. It can be
      repeated to add more attributes.

  Returns:
    valid CloudEvent.

  """
  cloud_event = {
      "@type": "type.googleapis.com/io.cloudevents.v1.CloudEvent",
      "id": event_id,
      "source": event_source,
      "specVersion": "1.0",
      "type": event_type,
      "attributes": {
          "time": {
              "ceTimestamp":
                  times.FormatDateTime(times.Now())
          },
          "datacontenttype": {
              "ceString": "application/json"
          },
      },
      "textData": event_data
  }

  # Event attributes could be zero or more
  # So it must be serialized into a dictionary
  if event_attributes is not None:
    for key, value in event_attributes.items():
      cloud_event["attributes"][key] = {"ceString": value}

  return cloud_event
コード例 #18
0
def _TokenExpiresWithinWindow(expiry_window,
                              token_expiry_time,
                              max_window_seconds=3600):
    """Determines if token_expiry_time is within expiry_window_duration.

  Calculates the amount of time between utcnow() and token_expiry_time and
  returns true, if that amount is less thank the provided duration window. All
  calculations are done in number of seconds for consistency.


  Args:
    expiry_window: string, Duration representing the amount of time between
      now and token_expiry_time to compare against.
    token_expiry_time: datetime, The time when token expires.
    max_window_seconds: int, Maximum size of expiry window, in seconds.

  Raises:
    ValueError: If expiry_window is invalid or can not be parsed.

  Returns:
    True if token is expired or will expire with in the provided window,
    False otherwise.
  """
    try:
        min_expiry = times.ParseDuration(expiry_window, default_suffix='s')
        if min_expiry.total_seconds > max_window_seconds:
            raise ValueError('Invalid expiry window duration [{}]: '
                             'Must be between 0s and 1h'.format(expiry_window))
    except times.Error as e:
        message = six.text_type(e).rstrip('.')
        raise ValueError('Error Parsing expiry window duration '
                         '[{}]: {}'.format(expiry_window, message))

    token_expiry_time = times.LocalizeDateTime(token_expiry_time,
                                               tzinfo=dateutil.tz.tzutc())
    window_end = times.GetDateTimePlusDuration(
        times.Now(tzinfo=dateutil.tz.tzutc()), min_expiry)

    return token_expiry_time <= window_end
コード例 #19
0
    def testListIncludesExpiredUnlimited(self):
        now = times.Now(times.UTC)
        self.StartObjectPatch(times, 'Now', return_value=now)
        create_time = now - datetime.timedelta(days=2)
        logpoints = [
            self.messages.Breakpoint(
                id='dummy-id-{0}'.format(i),
                createTime=create_time.isoformat(),
                action=self.messages.Breakpoint.ActionValueValuesEnum.LOG,
                logLevel=self.messages.Breakpoint.LogLevelValueValuesEnum.INFO,
                isFinalState=True,
                logMessageFormat='message',
                location=self.messages.SourceLocation(path='myfile', line=i))
            for i in range(0, 10)
        ]
        # Set logpoint 0 to a final time before well in the past.
        logpoints[0].finalTime = (
            now - datetime.timedelta(days=1, minutes=1)).isoformat()
        logpoints = [self.debuggee.AddTargetInfo(lp) for lp in logpoints]

        list_mock = self.StartObjectPatch(debug.Debuggee,
                                          'ListBreakpoints',
                                          return_value=logpoints)
        self.RunDebug(['logpoints', 'list', '--include-inactive=unlimited'])
        list_mock.assert_called_once_with(
            None,
            resource_ids=[],
            include_all_users=True,
            include_inactive=True,
            restrict_to_type=debug.Debugger.LOGPOINT_TYPE)

        for l in logpoints:
            self.AssertOutputContains(l.id)
        self.AssertOutputContains(
            'USER_EMAIL LOCATION CONDITION LOG_LEVEL LOG_MESSAGE_FORMAT ID STATUS',
            normalize_space=True)
コード例 #20
0
ファイル: rolling_action.py プロジェクト: barber223/AudioApp
def CreateRequest(args,
                  cleared_fields,
                  client,
                  resources,
                  minimal_action,
                  max_surge=None):
    """Create request helper for compute instance-groups managed rolling-action.

  Args:
    args: argparse namespace
    cleared_fields: Fields which are left cleared, but should be send in request
    client: The compute client
    resources: The compute resources
    minimal_action: MinimalActionValueValuesEnum value
    max_surge: InstanceGroupManagerUpdatePolicy.maxSurge value

  Returns:
    ComputeInstanceGroupManagersPatchRequest or
    ComputeRegionInstanceGroupManagersPatchRequest instance
  """
    resource_arg = instance_groups_flags.MULTISCOPE_INSTANCE_GROUP_MANAGER_ARG
    default_scope = compute_scope.ScopeEnum.ZONE
    scope_lister = flags.GetDefaultScopeLister(client)
    igm_ref = resource_arg.ResolveAsResource(args,
                                             resources,
                                             default_scope=default_scope,
                                             scope_lister=scope_lister)

    update_policy_type = (client.messages.InstanceGroupManagerUpdatePolicy.
                          TypeValueValuesEnum.PROACTIVE)
    max_unavailable = update_instances_utils.ParseFixedOrPercent(
        '--max-unavailable', 'max-unavailable', args.max_unavailable,
        client.messages)

    igm_info = managed_instance_groups_utils.GetInstanceGroupManagerOrThrow(
        igm_ref, client)

    versions = (igm_info.versions or [
        client.messages.InstanceGroupManagerVersion(
            instanceTemplate=igm_info.instanceTemplate)
    ])
    current_time_str = str(times.Now(times.UTC))
    for i, version in enumerate(versions):
        version.name = '%d/%s' % (i, current_time_str)

    update_policy = client.messages.InstanceGroupManagerUpdatePolicy(
        maxSurge=max_surge,
        maxUnavailable=max_unavailable,
        minReadySec=args.min_ready,
        minimalAction=minimal_action,
        type=update_policy_type)
    igm_resource = client.messages.InstanceGroupManager(
        instanceTemplate=None, updatePolicy=update_policy, versions=versions)
    if igm_ref.Collection() == 'compute.instanceGroupManagers':
        service = client.apitools_client.instanceGroupManagers
        request = client.messages.ComputeInstanceGroupManagersPatchRequest(
            instanceGroupManager=igm_ref.Name(),
            instanceGroupManagerResource=igm_resource,
            project=igm_ref.project,
            zone=igm_ref.zone)
    elif igm_ref.Collection() == 'compute.regionInstanceGroupManagers':
        service = client.apitools_client.regionInstanceGroupManagers
        request = client.messages.ComputeRegionInstanceGroupManagersPatchRequest(
            instanceGroupManager=igm_ref.Name(),
            instanceGroupManagerResource=igm_resource,
            project=igm_ref.project,
            region=igm_ref.region)
    # Due to 'Patch' semantics, we have to clear either 'fixed' or 'percent'.
    # Otherwise, we'll get an error that both 'fixed' and 'percent' are set.
    if max_surge is not None:
        cleared_fields.append('updatePolicy.maxSurge.fixed' if max_surge.fixed
                              is None else 'updatePolicy.maxSurge.percent')
    if max_unavailable is not None:
        cleared_fields.append(
            'updatePolicy.maxUnavailable.fixed' if max_unavailable.
            fixed is None else 'updatePolicy.maxUnavailable.percent')
    return (service, 'Patch', request)
コード例 #21
0
def TransformDuration(r,
                      start='',
                      end='',
                      parts=3,
                      precision=3,
                      calendar=True,
                      unit=1,
                      undefined=''):
    """Formats the resource as an ISO 8601 duration string.

  The [ISO 8601 Duration](https://en.wikipedia.org/wiki/ISO_8601#Durations)
  format is: "[-]P[nY][nM][nD][T[nH][nM][n[.m]S]]". The 0 duration is "P0".
  Otherwise at least one part will always be displayed. Negative durations are
  prefixed by "-". "T" disambiguates months "P2M" to the left of "T" and minutes
  "PT5M" to the right.

  If the resource is a datetime then the duration of `resource - current_time`
  is returned.

  Args:
    r: A JSON-serializable object.
    start: The name of a start time attribute in the resource. The duration of
      the `end - start` time attributes in resource is returned. If `end` is
      not specified then the current time is used.
    end: The name of an end time attribute in the resource. Defaults to
      the current time if omitted. Ignored if `start` is not specified.
    parts: Format at most this many duration parts starting with largest
      non-zero part.
    precision: Format the last duration part with precision digits after the
      decimal point. Trailing "0" and "." are always stripped.
    calendar: Allow time units larger than hours in formated durations if true.
      Durations specifying hours or smaller units are exact across daylight
      savings time boundaries. On by default. Use calendar=false to disable.
      For example, if `calendar=true` then at the daylight savings boundary
      2016-03-13T01:00:00 + P1D => 2016-03-14T01:00:00 but 2016-03-13T01:00:00 +
      PT24H => 2016-03-14T03:00:00. Similarly, a +P1Y duration will be inexact
      but "calendar correct", yielding the same month and day number next year,
      even in leap years.
    unit: Divide the resource numeric value by _unit_ to yield seconds.
    undefined: Returns this value if the resource is not a valid timestamp.

  Returns:
    The ISO 8601 duration string for r or undefined if r is not a duration.
  """
    try:
        parts = int(parts)
        precision = int(precision)
    except ValueError:
        return undefined
    calendar = GetBooleanArgValue(calendar)

    if start:
        # Duration of ((end or Now()) - start).

        # Get the datetime of both.
        try:
            start_datetime = times.ParseDateTime(_GetKeyValue(r, start))
            end_value = _GetKeyValue(r, end) if end else None
            if end_value:
                end_datetime = times.ParseDateTime(end_value)
            else:
                end_datetime = times.Now(tzinfo=start_datetime.tzinfo)
        except (AttributeError, ImportError, TypeError, ValueError):
            return undefined

        # Finally format the duration of the delta.
        delta = end_datetime - start_datetime
        return times.GetDurationFromTimeDelta(
            delta=delta, calendar=calendar).Format(parts=parts,
                                                   precision=precision)

    # Check if the resource is a float duration.
    try:
        duration = times.ParseDuration('PT{0}S'.format(float(r) / unit),
                                       calendar=calendar)
        return duration.Format(parts=parts, precision=precision)
    except (TypeError, ValueError):
        pass

    # Check if the resource is an ISO 8601 duration.
    try:
        duration = times.ParseDuration(r)
        return duration.Format(parts=parts, precision=precision)
    except (AttributeError, TypeError, ValueError):
        pass

    # Check if the resource is a datetime.
    try:
        start_datetime = times.ParseDateTime(r)
    except (AttributeError, ImportError, TypeError, ValueError):
        return undefined

    # Format the duration of (now - r).
    end_datetime = times.Now(tzinfo=start_datetime.tzinfo)
    delta = end_datetime - start_datetime
    return times.GetDurationFromTimeDelta(
        delta=delta, calendar=calendar).Format(parts=parts,
                                               precision=precision)
コード例 #22
0
    def Run(self, args):
        """This is what gets called when the user runs this command.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation.

    Returns:
      Some value that we want to have printed later.

    Raises:
      FailedBuildException: If the build is completed and not 'SUCCESS'.
    """

        if args.gcs_source_staging_dir is None:
            args.gcs_source_staging_dir = 'gs://{project}_cloudbuild/source'.format(
                project=properties.VALUES.core.project.Get(), )
        if args.gcs_log_dir is None:
            args.gcs_log_dir = 'gs://{project}_cloudbuild/logs'.format(
                project=properties.VALUES.core.project.Get(), )

        client = core_apis.GetClientInstance('cloudbuild', 'v1')
        messages = core_apis.GetMessagesModule('cloudbuild', 'v1')
        registry = self.context['registry']

        gcs_client = storage_api.StorageClient()

        # First, create the build request.
        build_timeout = properties.VALUES.container.build_timeout.Get()
        if build_timeout is not None:
            timeout_str = build_timeout + 's'
        else:
            timeout_str = None

        if args.tag:
            if 'gcr.io/' not in args.tag:
                raise c_exceptions.InvalidArgumentException(
                    '--tag',
                    'Tag value must be in the gcr.io/* or *.gcr.io/* namespace.'
                )
            build_config = messages.Build(
                images=[args.tag],
                steps=[
                    messages.BuildStep(
                        name='gcr.io/cloud-builders/docker',
                        args=['build', '--no-cache', '-t', args.tag, '.'],
                    ),
                ],
                timeout=timeout_str,
            )
        elif args.config:
            build_config = config.LoadCloudbuildConfig(args.config, messages)

        if build_config.timeout is None:
            build_config.timeout = timeout_str

        suffix = '.tgz'
        if args.source.startswith('gs://') or os.path.isfile(args.source):
            _, suffix = os.path.splitext(args.source)

        # Next, stage the source to Cloud Storage.
        staged_object = '{stamp}_{tag_ish}{suffix}'.format(
            stamp=times.GetTimeStampFromDateTime(times.Now()),
            tag_ish='_'.join(build_config.images or 'null').replace('/', '_'),
            suffix=suffix,
        )
        gcs_source_staging_dir = registry.Parse(args.gcs_source_staging_dir,
                                                collection='storage.objects')
        gcs_client.CreateBucketIfNotExists(gcs_source_staging_dir.bucket)
        if gcs_source_staging_dir.object:
            staged_object = gcs_source_staging_dir.object + '/' + staged_object

        gcs_source_staging = registry.Create(
            collection='storage.objects',
            bucket=gcs_source_staging_dir.bucket,
            object=staged_object)

        if args.source.startswith('gs://'):
            gcs_source = registry.Parse(args.source,
                                        collection='storage.objects')
            staged_source_obj = gcs_client.Copy(gcs_source, gcs_source_staging)
            build_config.source = messages.Source(
                storageSource=messages.StorageSource(
                    bucket=staged_source_obj.bucket,
                    object=staged_source_obj.name,
                    generation=staged_source_obj.generation,
                ))
        else:
            if not os.path.exists(args.source):
                raise c_exceptions.BadFileException(
                    'could not find source [{src}]'.format(src=args.source))
            if os.path.isdir(args.source):
                source_snapshot = snapshot.Snapshot(args.source)
                size_str = resource_transform.TransformSize(
                    source_snapshot.uncompressed_size)
                log.status.write(
                    'Creating temporary tarball archive of {num_files} file(s)'
                    ' totalling {size} before compression.\n'.format(
                        num_files=len(source_snapshot.files), size=size_str))
                staged_source_obj = source_snapshot.CopyTarballToGCS(
                    gcs_client, gcs_source_staging)
                build_config.source = messages.Source(
                    storageSource=messages.StorageSource(
                        bucket=staged_source_obj.bucket,
                        object=staged_source_obj.name,
                        generation=staged_source_obj.generation,
                    ))
            elif os.path.isfile(args.source):
                unused_root, ext = os.path.splitext(args.source)
                if ext not in _ALLOWED_SOURCE_EXT:
                    raise c_exceptions.BadFileException(
                        'Local file [{src}] is none of ' +
                        ', '.join(_ALLOWED_SOURCE_EXT))
                log.status.write('Uploading local file [{src}] to '
                                 '[gs://{bucket}/{object}]\n'.format(
                                     src=args.source,
                                     bucket=gcs_source_staging.bucket,
                                     object=gcs_source_staging.object,
                                 ))
                staged_source_obj = gcs_client.CopyFileToGCS(
                    storage_util.BucketReference.FromBucketUrl(
                        gcs_source_staging.bucket), args.source,
                    gcs_source_staging.object)
                build_config.source = messages.Source(
                    storageSource=messages.StorageSource(
                        bucket=staged_source_obj.bucket,
                        object=staged_source_obj.name,
                        generation=staged_source_obj.generation,
                    ))

        gcs_log_dir = registry.Parse(args.gcs_log_dir,
                                     collection='storage.objects')

        if gcs_log_dir.bucket != gcs_source_staging.bucket:
            # Create the logs bucket if it does not yet exist.
            gcs_client.CreateBucketIfNotExists(gcs_log_dir.bucket)
        build_config.logsBucket = 'gs://' + gcs_log_dir.bucket + '/' + gcs_log_dir.object

        log.debug('submitting build: ' + repr(build_config))

        # Start the build.
        op = client.projects_builds.Create(
            messages.CloudbuildProjectsBuildsCreateRequest(
                build=build_config,
                projectId=properties.VALUES.core.project.Get()))
        json = encoding.MessageToJson(op.metadata)
        build = encoding.JsonToMessage(messages.BuildOperationMetadata,
                                       json).build

        build_ref = registry.Create(collection='cloudbuild.projects.builds',
                                    projectId=build.projectId,
                                    id=build.id)

        log.CreatedResource(build_ref)
        if build.logUrl:
            log.status.write(
                'Logs are permanently available at [{log_url}]\n'.format(
                    log_url=build.logUrl))
        else:
            log.status.write('Logs are available in the Cloud Console.\n')

        # If the command is run --async, we just print out a reference to the build.
        if args. async:
            return build

        # Otherwise, logs are streamed from GCS.
        build = cb_logs.CloudBuildClient(client, messages).Stream(build_ref)

        if build.status != messages.Build.StatusValueValuesEnum.SUCCESS:
            raise FailedBuildException(build.status)

        return build
コード例 #23
0
  def Run(self, args):
    """This is what gets called when the user runs this command.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation.

    Returns:
      Some value that we want to have printed later.

    Raises:
      FailedBuildException: If the build is completed and not 'SUCCESS'.
    """

    project = properties.VALUES.core.project.Get(required=True)
    safe_project = project.replace(':', '_')
    safe_project = safe_project.replace('.', '_')
    # The string 'google' is not allowed in bucket names.
    safe_project = safe_project.replace('google', 'elgoog')

    default_bucket_name = '{}_cloudbuild'.format(safe_project)

    default_gcs_source = False
    if args.gcs_source_staging_dir is None:
      default_gcs_source = True
      args.gcs_source_staging_dir = 'gs://{}/source'.format(default_bucket_name)

    client = cloudbuild_util.GetClientInstance()
    messages = cloudbuild_util.GetMessagesModule()

    gcs_client = storage_api.StorageClient()

    # First, create the build request.
    build_timeout = properties.VALUES.builds.timeout.Get()

    if build_timeout is not None:
      try:
        # A bare number is interpreted as seconds.
        build_timeout_secs = int(build_timeout)
      except ValueError:
        build_timeout_duration = times.ParseDuration(build_timeout)
        build_timeout_secs = int(build_timeout_duration.total_seconds)
      timeout_str = six.text_type(build_timeout_secs) + 's'
    else:
      timeout_str = None

    if args.tag is not None:
      if (properties.VALUES.builds.check_tag.GetBool() and
          'gcr.io/' not in args.tag):
        raise c_exceptions.InvalidArgumentException(
            '--tag',
            'Tag value must be in the gcr.io/* or *.gcr.io/* namespace.')
      if properties.VALUES.builds.use_kaniko.GetBool():
        if args.no_cache:
          ttl = '0h'
        else:
          ttl = '{}h'.format(properties.VALUES.builds.kaniko_cache_ttl.Get())
        build_config = messages.Build(
            steps=[
                messages.BuildStep(
                    name=properties.VALUES.builds.kaniko_image.Get(),
                    args=[
                        '--destination', args.tag, '--cache', 'true',
                        '--cache-ttl', ttl
                    ],
                ),
            ],
            timeout=timeout_str,
            substitutions=cloudbuild_util.EncodeSubstitutions(
                args.substitutions, messages))
      else:
        if args.no_cache:
          raise c_exceptions.InvalidArgumentException(
              'no-cache',
              'Cannot specify --no-cache if builds/use_kaniko property is '
              'False')
        build_config = messages.Build(
            images=[args.tag],
            steps=[
                messages.BuildStep(
                    name='gcr.io/cloud-builders/docker',
                    args=[
                        'build', '--network', 'cloudbuild', '--no-cache', '-t',
                        args.tag, '.'
                    ],
                ),
            ],
            timeout=timeout_str,
            substitutions=cloudbuild_util.EncodeSubstitutions(
                args.substitutions, messages))
    elif args.config is not None:
      if args.no_cache:
        raise c_exceptions.ConflictingArgumentsException(
            '--config', '--no-cache')
      if not args.config:
        raise c_exceptions.InvalidArgumentException(
            '--config', 'Config file path must not be empty.')
      build_config = config.LoadCloudbuildConfigFromPath(
          args.config, messages, params=args.substitutions)
    else:
      raise c_exceptions.OneOfArgumentsRequiredException(
          ['--tag', '--config'],
          'Requires either a docker tag or a config file.')

    # If timeout was set by flag, overwrite the config file.
    if timeout_str:
      build_config.timeout = timeout_str

    # --no-source overrides the default --source.
    if not args.IsSpecified('source') and args.no_source:
      args.source = None

    gcs_source_staging = None
    if args.source:
      suffix = '.tgz'
      if args.source.startswith('gs://') or os.path.isfile(args.source):
        _, suffix = os.path.splitext(args.source)

      # Next, stage the source to Cloud Storage.
      staged_object = '{stamp}-{uuid}{suffix}'.format(
          stamp=times.GetTimeStampFromDateTime(times.Now()),
          uuid=uuid.uuid4().hex,
          suffix=suffix,
      )
      gcs_source_staging_dir = resources.REGISTRY.Parse(
          args.gcs_source_staging_dir, collection='storage.objects')

      # We create the bucket (if it does not exist) first. If we do an existence
      # check and then create the bucket ourselves, it would be possible for an
      # attacker to get lucky and beat us to creating the bucket. Block on this
      # creation to avoid this race condition.
      gcs_client.CreateBucketIfNotExists(gcs_source_staging_dir.bucket)

      # If no bucket is specified (for the source `default_gcs_source`), check
      # that the default bucket is also owned by the project (b/33046325).
      if default_gcs_source:
        # This request returns only the buckets owned by the project.
        bucket_list_req = gcs_client.messages.StorageBucketsListRequest(
            project=project, prefix=default_bucket_name)
        bucket_list = gcs_client.client.buckets.List(bucket_list_req)
        found_bucket = False
        for bucket in bucket_list.items:
          if bucket.id == default_bucket_name:
            found_bucket = True
            break
        if not found_bucket:
          if default_gcs_source:
            raise c_exceptions.RequiredArgumentException(
                'gcs_source_staging_dir',
                'A bucket with name {} already exists and is owned by '
                'another project. Specify a bucket using '
                '--gcs_source_staging_dir.'.format(default_bucket_name))

      if gcs_source_staging_dir.object:
        staged_object = gcs_source_staging_dir.object + '/' + staged_object
      gcs_source_staging = resources.REGISTRY.Create(
          collection='storage.objects',
          bucket=gcs_source_staging_dir.bucket,
          object=staged_object)

      if args.source.startswith('gs://'):
        gcs_source = resources.REGISTRY.Parse(
            args.source, collection='storage.objects')
        staged_source_obj = gcs_client.Rewrite(gcs_source, gcs_source_staging)
        build_config.source = messages.Source(
            storageSource=messages.StorageSource(
                bucket=staged_source_obj.bucket,
                object=staged_source_obj.name,
                generation=staged_source_obj.generation,
            ))
      else:
        if not os.path.exists(args.source):
          raise c_exceptions.BadFileException(
              'could not find source [{src}]'.format(src=args.source))
        if os.path.isdir(args.source):
          source_snapshot = snapshot.Snapshot(args.source,
                                              ignore_file=args.ignore_file)
          size_str = resource_transform.TransformSize(
              source_snapshot.uncompressed_size)
          log.status.Print(
              'Creating temporary tarball archive of {num_files} file(s)'
              ' totalling {size} before compression.'.format(
                  num_files=len(source_snapshot.files), size=size_str))
          staged_source_obj = source_snapshot.CopyTarballToGCS(
              gcs_client, gcs_source_staging, ignore_file=args.ignore_file)
          build_config.source = messages.Source(
              storageSource=messages.StorageSource(
                  bucket=staged_source_obj.bucket,
                  object=staged_source_obj.name,
                  generation=staged_source_obj.generation,
              ))
        elif os.path.isfile(args.source):
          unused_root, ext = os.path.splitext(args.source)
          if ext not in _ALLOWED_SOURCE_EXT:
            raise c_exceptions.BadFileException(
                'Local file [{src}] is none of ' +
                ', '.join(_ALLOWED_SOURCE_EXT))
          log.status.Print('Uploading local file [{src}] to '
                           '[gs://{bucket}/{object}].'.format(
                               src=args.source,
                               bucket=gcs_source_staging.bucket,
                               object=gcs_source_staging.object,
                           ))
          staged_source_obj = gcs_client.CopyFileToGCS(args.source,
                                                       gcs_source_staging)
          build_config.source = messages.Source(
              storageSource=messages.StorageSource(
                  bucket=staged_source_obj.bucket,
                  object=staged_source_obj.name,
                  generation=staged_source_obj.generation,
              ))
    else:
      # No source
      if not args.no_source:
        raise c_exceptions.InvalidArgumentException(
            '--no-source', 'To omit source, use the --no-source flag.')

    if args.gcs_log_dir:
      gcs_log_dir = resources.REGISTRY.Parse(
          args.gcs_log_dir, collection='storage.objects')

      build_config.logsBucket = ('gs://' + gcs_log_dir.bucket + '/' +
                                 gcs_log_dir.object)

    # Machine type.
    if args.machine_type is not None:
      machine_type = Submit._machine_type_flag_map.GetEnumForChoice(
          args.machine_type)
      if not build_config.options:
        build_config.options = messages.BuildOptions()
      build_config.options.machineType = machine_type

    # Disk size.
    if args.disk_size is not None:
      disk_size = compute_utils.BytesToGb(args.disk_size)
      if not build_config.options:
        build_config.options = messages.BuildOptions()
      build_config.options.diskSizeGb = int(disk_size)

    log.debug('submitting build: ' + repr(build_config))

    # Start the build.
    op = client.projects_builds.Create(
        messages.CloudbuildProjectsBuildsCreateRequest(
            build=build_config, projectId=properties.VALUES.core.project.Get()))
    json = encoding.MessageToJson(op.metadata)
    build = encoding.JsonToMessage(messages.BuildOperationMetadata, json).build

    build_ref = resources.REGISTRY.Create(
        collection='cloudbuild.projects.builds',
        projectId=build.projectId,
        id=build.id)

    log.CreatedResource(build_ref)
    if build.logUrl:
      log.status.Print(
          'Logs are available at [{log_url}].'.format(log_url=build.logUrl))
    else:
      log.status.Print('Logs are available in the Cloud Console.')

    # If the command is run --async, we just print out a reference to the build.
    if args.async:
      return build

    mash_handler = execution.MashHandler(
        execution.GetCancelBuildHandler(client, messages, build_ref))

    # Otherwise, logs are streamed from GCS.
    with execution_utils.CtrlCSection(mash_handler):
      build = cb_logs.CloudBuildClient(client, messages).Stream(build_ref)

    if build.status == messages.Build.StatusValueValuesEnum.TIMEOUT:
      log.status.Print(
          'Your build timed out. Use the [--timeout=DURATION] flag to change '
          'the timeout threshold.')

    if build.status != messages.Build.StatusValueValuesEnum.SUCCESS:
      raise FailedBuildException(build)

    return build
コード例 #24
0
def CreateRequest(args,
                  client,
                  resources,
                  minimal_action,
                  max_surge=None):
  """Create request helper for compute instance-groups managed rolling-action.

  Args:
    args: argparse namespace
    client: The compute client
    resources: The compute resources
    minimal_action: MinimalActionValueValuesEnum value
    max_surge: InstanceGroupManagerUpdatePolicy.maxSurge value

  Returns:
    ComputeInstanceGroupManagersPatchRequest or
    ComputeRegionInstanceGroupManagersPatchRequest instance

  Raises:
    ValueError: if instance group manager collection path is unknown
  """
  resource_arg = instance_groups_flags.MULTISCOPE_INSTANCE_GROUP_MANAGER_ARG
  default_scope = compute_scope.ScopeEnum.ZONE
  scope_lister = flags.GetDefaultScopeLister(client)
  igm_ref = resource_arg.ResolveAsResource(
      args, resources, default_scope=default_scope, scope_lister=scope_lister)

  if igm_ref.Collection() not in [
      'compute.instanceGroupManagers', 'compute.regionInstanceGroupManagers'
  ]:
    raise ValueError('Unknown reference type {0}'.format(igm_ref.Collection()))

  update_policy_type = (client.messages.InstanceGroupManagerUpdatePolicy.
                        TypeValueValuesEnum.PROACTIVE)
  max_unavailable = update_instances_utils.ParseFixedOrPercent(
      '--max-unavailable', 'max-unavailable', args.max_unavailable,
      client.messages)

  igm_info = managed_instance_groups_utils.GetInstanceGroupManagerOrThrow(
      igm_ref, client)

  versions = (igm_info.versions or [
      client.messages.InstanceGroupManagerVersion(
          instanceTemplate=igm_info.instanceTemplate)
  ])
  current_time_str = str(times.Now(times.UTC))
  for i, version in enumerate(versions):
    version.name = '%d/%s' % (i, current_time_str)

  update_policy = client.messages.InstanceGroupManagerUpdatePolicy(
      maxSurge=max_surge,
      maxUnavailable=max_unavailable,
      minimalAction=minimal_action,
      type=update_policy_type)
  # min_ready is available in alpha and beta APIs only
  if hasattr(args, 'min_ready'):
    update_policy.minReadySec = args.min_ready
  # replacement_method is available in alpha API only
  if hasattr(args, 'replacement_method'):
    replacement_method = update_instances_utils.ParseReplacementMethod(
        args.replacement_method, client.messages)
    update_policy.replacementMethod = replacement_method

  igm_resource = client.messages.InstanceGroupManager(
      instanceTemplate=None, updatePolicy=update_policy, versions=versions)
  if igm_ref.Collection() == 'compute.instanceGroupManagers':
    service = client.apitools_client.instanceGroupManagers
    request = client.messages.ComputeInstanceGroupManagersPatchRequest(
        instanceGroupManager=igm_ref.Name(),
        instanceGroupManagerResource=igm_resource,
        project=igm_ref.project,
        zone=igm_ref.zone)
  else:
    service = client.apitools_client.regionInstanceGroupManagers
    request = client.messages.ComputeRegionInstanceGroupManagersPatchRequest(
        instanceGroupManager=igm_ref.Name(),
        instanceGroupManagerResource=igm_resource,
        project=igm_ref.project,
        region=igm_ref.region)
  return (service, 'Patch', request)
コード例 #25
0
    def CreateRequest(self, args, cleared_fields):
        resource_arg = instance_groups_flags.MULTISCOPE_INSTANCE_GROUP_MANAGER_ARG
        default_scope = compute_scope.ScopeEnum.ZONE
        scope_lister = flags.GetDefaultScopeLister(self.compute_client,
                                                   self.project)
        igm_ref = resource_arg.ResolveAsResource(args,
                                                 self.resources,
                                                 default_scope=default_scope,
                                                 scope_lister=scope_lister)

        update_instances_utils.ValidateUpdateInstancesArgs(args)
        update_policy_type = update_instances_utils.ParseUpdatePolicyType(
            '--type', args.type, self.messages)
        max_surge = update_instances_utils.ParseFixedOrPercent(
            '--max-surge', 'max-surge', args.max_surge, self.messages)
        max_unavailable = update_instances_utils.ParseFixedOrPercent(
            '--max-unavailable', 'max-unavailable', args.max_unavailable,
            self.messages)

        igm_info = managed_instance_groups_utils.GetInstanceGroupManagerOrThrow(
            igm_ref, self.compute_client)
        if args.action == 'replace':
            versions = []
            if args.version_original:
                versions.append(
                    update_instances_utils.ParseVersion(
                        '--version-original', args.version_original,
                        self.resources, self.messages))
            versions.append(
                update_instances_utils.ParseVersion('--version-new',
                                                    args.version_new,
                                                    self.resources,
                                                    self.messages))
            managed_instance_groups_utils.ValidateVersions(
                igm_info, versions, args.force)

            # TODO(b/36056457): Decide what we should do when two versions have the
            # same instance template (this can happen with canary restart performed
            # using tags).
            igm_version_names = {
                version.instanceTemplate: version.name
                for version in igm_info.versions
            }
            for version in versions:
                version.name = igm_version_names.get(version.instanceTemplate)
                version.tag = version.name
            minimal_action = (self.messages.InstanceGroupManagerUpdatePolicy.
                              MinimalActionValueValuesEnum.REPLACE)
        elif args.action == 'restart' and igm_info.versions is not None:
            versions = (igm_info.versions or [
                self.messages.InstanceGroupManagerVersion(
                    instanceTemplate=igm_info.instanceTemplate)
            ])
            current_time_str = str(times.Now(times.UTC))
            for i, version in enumerate(versions):
                version.name = '%d/%s' % (i, current_time_str)
                version.tag = version.name
            minimal_action = (self.messages.InstanceGroupManagerUpdatePolicy.
                              MinimalActionValueValuesEnum.RESTART)
        else:
            raise exceptions.InvalidArgumentException('--action',
                                                      'unknown action type.')

        update_policy = self.messages.InstanceGroupManagerUpdatePolicy(
            maxSurge=max_surge,
            maxUnavailable=max_unavailable,
            minReadySec=args.min_ready,
            minimalAction=minimal_action,
            type=update_policy_type)
        igm_resource = self.messages.InstanceGroupManager(
            instanceTemplate=None,
            updatePolicy=update_policy,
            versions=versions)
        if hasattr(igm_ref, 'zone'):
            service = self.compute.instanceGroupManagers
            request = (self.messages.ComputeInstanceGroupManagersPatchRequest(
                instanceGroupManager=igm_ref.Name(),
                instanceGroupManagerResource=igm_resource,
                project=self.project,
                zone=igm_ref.zone))
        elif hasattr(igm_ref, 'region'):
            service = self.compute.regionInstanceGroupManagers
            request = (
                self.messages.ComputeRegionInstanceGroupManagersPatchRequest(
                    instanceGroupManager=igm_ref.Name(),
                    instanceGroupManagerResource=igm_resource,
                    project=self.project,
                    region=igm_ref.region))
        # Due to 'Patch' semantics, we have to clear either 'fixed' or 'percent'.
        # Otherwise, we'll get an error that both 'fixed' and 'percent' are set.
        if max_surge is not None:
            cleared_fields.append(
                'updatePolicy.maxSurge.fixed' if max_surge.fixed is None else
                'updatePolicy.maxSurge.percent')
        if max_unavailable is not None:
            cleared_fields.append(
                'updatePolicy.maxUnavailable.fixed' if max_unavailable.
                fixed is None else 'updatePolicy.maxUnavailable.percent')
        return (service, 'Patch', request)
コード例 #26
0
    def CreateRequests(self, args):
        resource_arg = instance_groups_flags.ZONAL_INSTANCE_GROUP_MANAGER_ARG
        default_scope = flags.ScopeEnum.ZONE
        scope_lister = flags.GetDefaultScopeLister(self.compute_client,
                                                   self.project)
        igm_ref = resource_arg.ResolveAsResource(args,
                                                 self.resources,
                                                 default_scope=default_scope,
                                                 scope_lister=scope_lister)

        update_instances_utils.ValidateUpdateInstancesArgs(args)
        update_policy_type = update_instances_utils.ParseUpdatePolicyType(
            '--type', args.type, self.messages)
        max_surge = update_instances_utils.ParseFixedOrPercent(
            '--max-surge', 'max-surge', args.max_surge, self.messages)
        max_unavailable = update_instances_utils.ParseFixedOrPercent(
            '--max-unavailable', 'max-unavailable', args.max_unavailable,
            self.messages)

        igm_info = managed_instance_groups_utils.GetInstanceGroupManagerOrThrow(
            igm_ref, self.project, self.compute, self.http, self.batch_url)
        if args.action == 'replace':
            versions = []
            if args.version_original:
                versions.append(
                    update_instances_utils.ParseVersion(
                        '--version-original', args.version_original,
                        self.resources, self.messages))
            versions.append(
                update_instances_utils.ParseVersion('--version-new',
                                                    args.version_new,
                                                    self.resources,
                                                    self.messages))
            managed_instance_groups_utils.ValidateVersions(
                igm_info, versions, args.force)

            igm_tags = dict((version.instanceTemplate, version.tag)
                            for version in igm_info.versions)
            for version in versions:
                version.tag = igm_tags.get(version.instanceTemplate)
            minimal_action = (self.messages.InstanceGroupManagerUpdatePolicy.
                              MinimalActionValueValuesEnum.REPLACE)
        elif args.action == 'restart' and igm_info.versions is not None:
            versions = (igm_info.versions or [
                self.messages.InstanceGroupManagerVersion(
                    instanceTemplate=igm_info.instanceTemplate)
            ])
            for version in versions:
                version.tag = str(times.Now(times.UTC))
            minimal_action = (self.messages.InstanceGroupManagerUpdatePolicy.
                              MinimalActionValueValuesEnum.RESTART)
        else:
            raise exceptions.InvalidArgumentException('--action',
                                                      'unknown action type.')

        update_policy = self.messages.InstanceGroupManagerUpdatePolicy(
            maxSurge=max_surge,
            maxUnavailable=max_unavailable,
            minReadySec=args.min_ready,
            minimalAction=minimal_action,
            type=update_policy_type,
        )
        service = self.compute.instanceGroupManagers
        request = (self.messages.ComputeInstanceGroupManagersPatchRequest(
            instanceGroupManager=igm_ref.Name(),
            instanceGroupManagerResource=(self.messages.InstanceGroupManager(
                instanceTemplate=None,
                updatePolicy=update_policy,
                versions=versions)),
            project=self.project,
            zone=igm_ref.zone))

        return [(service, self.method, request)]
コード例 #27
0
ファイル: submit_util.py プロジェクト: PinTrees/novelhub
def _SetSource(build_config, messages, is_specified_source, no_source, source,
               gcs_source_staging_dir, ignore_file):
    """Set the source for the build config."""
    default_gcs_source = False
    default_bucket_name = None
    if gcs_source_staging_dir is None:
        default_gcs_source = True
        default_bucket_name = staging_bucket_util.GetDefaultStagingBucket()
        gcs_source_staging_dir = 'gs://{}/source'.format(default_bucket_name)
    gcs_client = storage_api.StorageClient()

    # --no-source overrides the default --source.
    if not is_specified_source and no_source:
        source = None

    gcs_source_staging = None
    if source:
        suffix = '.tgz'
        if source.startswith('gs://') or os.path.isfile(source):
            _, suffix = os.path.splitext(source)

        # Next, stage the source to Cloud Storage.
        staged_object = '{stamp}-{uuid}{suffix}'.format(
            stamp=times.GetTimeStampFromDateTime(times.Now()),
            uuid=uuid.uuid4().hex,
            suffix=suffix,
        )
        gcs_source_staging_dir = resources.REGISTRY.Parse(
            gcs_source_staging_dir, collection='storage.objects')

        # We create the bucket (if it does not exist) first. If we do an existence
        # check and then create the bucket ourselves, it would be possible for an
        # attacker to get lucky and beat us to creating the bucket. Block on this
        # creation to avoid this race condition.
        gcs_client.CreateBucketIfNotExists(gcs_source_staging_dir.bucket)

        # If no bucket is specified (for the source `default_gcs_source`), check
        # that the default bucket is also owned by the project (b/33046325).
        if default_gcs_source and not staging_bucket_util.BucketIsInProject(
                gcs_client, default_bucket_name):
            raise c_exceptions.RequiredArgumentException(
                'gcs-source-staging-dir',
                'A bucket with name {} already exists and is owned by '
                'another project. Specify a bucket using '
                '--gcs-source-staging-dir.'.format(default_bucket_name))

        if gcs_source_staging_dir.object:
            staged_object = gcs_source_staging_dir.object + '/' + staged_object
        gcs_source_staging = resources.REGISTRY.Create(
            collection='storage.objects',
            bucket=gcs_source_staging_dir.bucket,
            object=staged_object)

        if source.startswith('gs://'):
            gcs_source = resources.REGISTRY.Parse(source,
                                                  collection='storage.objects')
            staged_source_obj = gcs_client.Rewrite(gcs_source,
                                                   gcs_source_staging)
            build_config.source = messages.Source(
                storageSource=messages.StorageSource(
                    bucket=staged_source_obj.bucket,
                    object=staged_source_obj.name,
                    generation=staged_source_obj.generation,
                ))
        else:
            if not os.path.exists(source):
                raise c_exceptions.BadFileException(
                    'could not find source [{src}]'.format(src=source))
            if os.path.isdir(source):
                source_snapshot = snapshot.Snapshot(source,
                                                    ignore_file=ignore_file)
                size_str = resource_transform.TransformSize(
                    source_snapshot.uncompressed_size)
                log.status.Print(
                    'Creating temporary tarball archive of {num_files} file(s)'
                    ' totalling {size} before compression.'.format(
                        num_files=len(source_snapshot.files), size=size_str))
                staged_source_obj = source_snapshot.CopyTarballToGCS(
                    gcs_client, gcs_source_staging, ignore_file=ignore_file)
                build_config.source = messages.Source(
                    storageSource=messages.StorageSource(
                        bucket=staged_source_obj.bucket,
                        object=staged_source_obj.name,
                        generation=staged_source_obj.generation,
                    ))
            elif os.path.isfile(source):
                unused_root, ext = os.path.splitext(source)
                if ext not in _ALLOWED_SOURCE_EXT:
                    raise c_exceptions.BadFileException(
                        'Local file [{src}] is none of ' +
                        ', '.join(_ALLOWED_SOURCE_EXT))
                log.status.Print('Uploading local file [{src}] to '
                                 '[gs://{bucket}/{object}].'.format(
                                     src=source,
                                     bucket=gcs_source_staging.bucket,
                                     object=gcs_source_staging.object,
                                 ))
                staged_source_obj = gcs_client.CopyFileToGCS(
                    source, gcs_source_staging)
                build_config.source = messages.Source(
                    storageSource=messages.StorageSource(
                        bucket=staged_source_obj.bucket,
                        object=staged_source_obj.name,
                        generation=staged_source_obj.generation,
                    ))
    else:
        # No source
        if not no_source:
            raise c_exceptions.InvalidArgumentException(
                '--no-source', 'To omit source, use the --no-source flag.')

    return build_config
コード例 #28
0
    def _StageSource(self, source, gcs_staging_dir_bucket,
                     gcs_staging_dir_object):
        """Stages source onto the provided bucket and returns its reference.

    Args:
      source: Path to source repo as a directory on a local disk or a
        gzipped archive file (.tar.gz) in Google Cloud Storage.
      gcs_staging_dir_bucket: Bucket name of staging directory.
      gcs_staging_dir_object: Bucket object of staging directory.

    Returns:
      Reference to the staged source, which has bucket, name, and generation
        fields.
    """

        suffix = '.tgz'
        if source.startswith('gs://') or os.path.isfile(source):
            _, suffix = os.path.splitext(source)

        source_object = 'source/{stamp}-{uuid}{suffix}'.format(
            stamp=times.GetTimeStampFromDateTime(times.Now()),
            uuid=uuid.uuid4().hex,
            suffix=suffix,
        )

        if gcs_staging_dir_object:
            source_object = gcs_staging_dir_object + '/' + source_object

        gcs_source_staging = resources.REGISTRY.Create(
            collection='storage.objects',
            bucket=gcs_staging_dir_bucket,
            object=source_object)

        gcs_client = storage_api.StorageClient()
        if source.startswith('gs://'):
            gcs_source = resources.REGISTRY.Parse(source,
                                                  collection='storage.objects')
            staged_source = gcs_client.Rewrite(gcs_source, gcs_source_staging)
        else:
            if not os.path.exists(source):
                raise c_exceptions.BadFileException(
                    'could not find source [{src}]'.format(src=source))
            elif os.path.isdir(source):
                source_snapshot = snapshot.Snapshot(source)
                size_str = resource_transform.TransformSize(
                    source_snapshot.uncompressed_size)
                log.status.Print(
                    'Creating temporary tarball archive of {num_files} file(s)'
                    ' totalling {size} before compression.'.format(
                        num_files=len(source_snapshot.files), size=size_str))
                staged_source = source_snapshot.CopyTarballToGCS(
                    gcs_client, gcs_source_staging)
            elif os.path.isfile(source):
                unused_root, ext = os.path.splitext(source)
                if ext not in _ALLOWED_SOURCE_EXT:
                    raise c_exceptions.BadFileException(
                        'Local file [{src}] is none of '.format(src=source) +
                        ', '.join(_ALLOWED_SOURCE_EXT))
                log.status.Print('Uploading local file [{src}] to '
                                 '[gs://{bucket}/{object}].'.format(
                                     src=source,
                                     bucket=gcs_source_staging.bucket,
                                     object=gcs_source_staging.object,
                                 ))
                staged_source = gcs_client.CopyFileToGCS(
                    source, gcs_source_staging)

        return staged_source
コード例 #29
0
ファイル: submit.py プロジェクト: krisztinagy/master_thesis
  def Run(self, args):
    """This is what gets called when the user runs this command.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation.

    Returns:
      Some value that we want to have printed later.

    Raises:
      FailedBuildException: If the build is completed and not 'SUCCESS'.
    """

    project = properties.VALUES.core.project.Get()
    safe_project = project.replace(':', '_')
    safe_project = safe_project.replace('.', '_')
    # The string 'google' is not allowed in bucket names.
    safe_project = safe_project.replace('google', 'elgoog')

    default_bucket_name = '{}_cloudbuild'.format(safe_project)

    default_gcs_source = False
    if args.gcs_source_staging_dir is None:
      default_gcs_source = True
      args.gcs_source_staging_dir = 'gs://{}/source'.format(default_bucket_name)

    default_gcs_log_dir = False
    if args.gcs_log_dir is None:
      default_gcs_log_dir = True
      args.gcs_log_dir = 'gs://{}/logs'.format(default_bucket_name)

    client = cloudbuild_util.GetClientInstance()
    messages = cloudbuild_util.GetMessagesModule()

    gcs_client = storage_api.StorageClient()

    # First, create the build request.
    build_timeout = properties.VALUES.container.build_timeout.Get()

    if build_timeout is not None:
      try:
        # A bare number is interpreted as seconds.
        build_timeout_secs = int(build_timeout)
      except ValueError:
        build_timeout_duration = times.ParseDuration(build_timeout)
        build_timeout_secs = int(build_timeout_duration.total_seconds)
      timeout_str = str(build_timeout_secs) + 's'
    else:
      timeout_str = None

    if args.tag:
      if 'gcr.io/' not in args.tag:
        raise c_exceptions.InvalidArgumentException(
            '--tag',
            'Tag value must be in the gcr.io/* or *.gcr.io/* namespace.')
      build_config = messages.Build(
          images=[args.tag],
          steps=[
              messages.BuildStep(
                  name='gcr.io/cloud-builders/docker',
                  args=['build', '--no-cache', '-t', args.tag, '.'],
              ),
          ],
          timeout=timeout_str,
          substitutions=cloudbuild_util.EncodeSubstitutions(args.substitutions,
                                                            messages)
      )
    elif args.config:
      build_config = config.LoadCloudbuildConfigFromPath(
          args.config, messages, params=args.substitutions)

    # If timeout was set by flag, overwrite the config file.
    if timeout_str:
      build_config.timeout = timeout_str

    suffix = '.tgz'
    if args.source.startswith('gs://') or os.path.isfile(args.source):
      _, suffix = os.path.splitext(args.source)

    # Next, stage the source to Cloud Storage.
    staged_object = '{stamp}{suffix}'.format(
        stamp=times.GetTimeStampFromDateTime(times.Now()),
        suffix=suffix,
    )
    gcs_source_staging_dir = resources.REGISTRY.Parse(
        args.gcs_source_staging_dir, collection='storage.objects')

    # We first try to create the bucket, before doing all the checks, in order
    # to avoid a race condition. If we do the check first, an attacker could
    # be lucky enough to create the bucket after the check and before this
    # bucket creation.
    gcs_client.CreateBucketIfNotExists(gcs_source_staging_dir.bucket)

    # If no bucket is specified (for the source `default_gcs_source` or for the
    # logs `default_gcs_log_dir`), check that the default bucket is also owned
    # by the project (b/33046325).
    if default_gcs_source or default_gcs_log_dir:
      # This request returns only the buckets owned by the project.
      bucket_list_req = gcs_client.messages.StorageBucketsListRequest(
          project=project,
          prefix=default_bucket_name)
      bucket_list = gcs_client.client.buckets.List(bucket_list_req)
      found_bucket = False
      for bucket in bucket_list.items:
        if bucket.id == default_bucket_name:
          found_bucket = True
          break
      if not found_bucket:
        if default_gcs_source:
          raise c_exceptions.RequiredArgumentException(
              'gcs_source_staging_dir',
              'A bucket with name {} already exists and is owned by '
              'another project. Specify a bucket using '
              '--gcs_source_staging_dir.'.format(default_bucket_name))
        elif default_gcs_log_dir:
          raise c_exceptions.RequiredArgumentException(
              'gcs-log-dir',
              'A bucket with name {} already exists and is owned by '
              'another project. Specify a bucket to hold build logs '
              'using --gcs-log-dir.'.format(default_bucket_name))

    if gcs_source_staging_dir.object:
      staged_object = gcs_source_staging_dir.object + '/' + staged_object

    gcs_source_staging = resources.REGISTRY.Create(
        collection='storage.objects',
        bucket=gcs_source_staging_dir.bucket,
        object=staged_object)

    if args.source.startswith('gs://'):
      gcs_source = resources.REGISTRY.Parse(
          args.source, collection='storage.objects')
      staged_source_obj = gcs_client.Rewrite(gcs_source, gcs_source_staging)
      build_config.source = messages.Source(
          storageSource=messages.StorageSource(
              bucket=staged_source_obj.bucket,
              object=staged_source_obj.name,
              generation=staged_source_obj.generation,
          ))
    else:
      if not os.path.exists(args.source):
        raise c_exceptions.BadFileException(
            'could not find source [{src}]'.format(src=args.source))
      if os.path.isdir(args.source):
        source_snapshot = snapshot.Snapshot(args.source)
        size_str = resource_transform.TransformSize(
            source_snapshot.uncompressed_size)
        log.status.Print(
            'Creating temporary tarball archive of {num_files} file(s)'
            ' totalling {size} before compression.'.format(
                num_files=len(source_snapshot.files),
                size=size_str))
        staged_source_obj = source_snapshot.CopyTarballToGCS(
            gcs_client, gcs_source_staging)
        build_config.source = messages.Source(
            storageSource=messages.StorageSource(
                bucket=staged_source_obj.bucket,
                object=staged_source_obj.name,
                generation=staged_source_obj.generation,
            ))
      elif os.path.isfile(args.source):
        unused_root, ext = os.path.splitext(args.source)
        if ext not in _ALLOWED_SOURCE_EXT:
          raise c_exceptions.BadFileException(
              'Local file [{src}] is none of '+', '.join(_ALLOWED_SOURCE_EXT))
        log.status.Print(
            'Uploading local file [{src}] to '
            '[gs://{bucket}/{object}].'.format(
                src=args.source,
                bucket=gcs_source_staging.bucket,
                object=gcs_source_staging.object,
            ))
        staged_source_obj = gcs_client.CopyFileToGCS(
            storage_util.BucketReference.FromBucketUrl(
                gcs_source_staging.bucket),
            args.source, gcs_source_staging.object)
        build_config.source = messages.Source(
            storageSource=messages.StorageSource(
                bucket=staged_source_obj.bucket,
                object=staged_source_obj.name,
                generation=staged_source_obj.generation,
            ))

    gcs_log_dir = resources.REGISTRY.Parse(
        args.gcs_log_dir, collection='storage.objects')

    if gcs_log_dir.bucket != gcs_source_staging.bucket:
      # Create the logs bucket if it does not yet exist.
      gcs_client.CreateBucketIfNotExists(gcs_log_dir.bucket)
    build_config.logsBucket = 'gs://'+gcs_log_dir.bucket+'/'+gcs_log_dir.object

    log.debug('submitting build: '+repr(build_config))

    # Start the build.
    op = client.projects_builds.Create(
        messages.CloudbuildProjectsBuildsCreateRequest(
            build=build_config,
            projectId=properties.VALUES.core.project.Get()))
    json = encoding.MessageToJson(op.metadata)
    build = encoding.JsonToMessage(messages.BuildOperationMetadata, json).build

    build_ref = resources.REGISTRY.Create(
        collection='cloudbuild.projects.builds',
        projectId=build.projectId,
        id=build.id)

    log.CreatedResource(build_ref)
    if build.logUrl:
      log.status.Print('Logs are available at [{log_url}].'.format(
          log_url=build.logUrl))
    else:
      log.status.Print('Logs are available in the Cloud Console.')

    # If the command is run --async, we just print out a reference to the build.
    if args.async:
      return build

    mash_handler = execution.MashHandler(
        execution.GetCancelBuildHandler(client, messages, build_ref))

    # Otherwise, logs are streamed from GCS.
    with execution_utils.CtrlCSection(mash_handler):
      build = cb_logs.CloudBuildClient(client, messages).Stream(build_ref)

    if build.status == messages.Build.StatusValueValuesEnum.TIMEOUT:
      log.status.Print(
          'Your build timed out. Use the [--timeout=DURATION] flag to change '
          'the timeout threshold.')

    if build.status != messages.Build.StatusValueValuesEnum.SUCCESS:
      raise FailedBuildException(build)

    return build
コード例 #30
0
def GetCurrentTime():
    """Returns the current UTC datetime."""
    return times.Now(tzinfo=times.UTC)