예제 #1
0
    def Run(self, args):
        """Issues an InstanceTemplates.Insert request.

    Args:
      args: the argparse arguments that this command was invoked with.

    Returns:
      an InstanceTemplate message object
    """
        holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
        client = holder.client

        instances_flags.ValidateDockerArgs(args)
        instances_flags.ValidateDiskCommonFlags(args)
        instances_flags.ValidateLocalSsdFlags(args)
        instances_flags.ValidateServiceAccountAndScopeArgs(args)
        instances_flags.ValidateNetworkTierArgs(args,
                                                support_network_tier=True)
        if instance_utils.UseExistingBootDisk(args.disk or []):
            raise exceptions.InvalidArgumentException(
                '--disk', 'Boot disk specified for containerized VM.')

        boot_disk_size_gb = utils.BytesToGb(args.boot_disk_size)
        utils.WarnIfDiskSizeIsTooSmall(boot_disk_size_gb, args.boot_disk_type)

        instance_template_ref = (
            CreateFromContainer.InstanceTemplateArg.ResolveAsResource(
                args, holder.resources))

        user_metadata = metadata_utils.ConstructMetadataMessage(
            client.messages,
            metadata=args.metadata,
            metadata_from_file=args.metadata_from_file)
        containers_utils.ValidateUserMetadata(user_metadata)

        network_interface = instance_template_utils.CreateNetworkInterfaceMessage(
            resources=holder.resources,
            scope_lister=flags.GetDefaultScopeLister(client),
            messages=client.messages,
            network=args.network,
            region=args.region,
            subnet=args.subnet,
            address=(instance_template_utils.EPHEMERAL_ADDRESS
                     if not args.no_address and not args.address else
                     args.address),
            network_tier=args.network_tier)

        scheduling = instance_utils.CreateSchedulingMessage(
            messages=client.messages,
            maintenance_policy=args.maintenance_policy,
            preemptible=args.preemptible,
            restart_on_failure=args.restart_on_failure)

        if args.no_service_account:
            service_account = None
        else:
            service_account = args.service_account
        service_accounts = instance_utils.CreateServiceAccountMessages(
            messages=client.messages,
            scopes=[] if args.no_scopes else args.scopes,
            service_account=service_account)

        image_uri = containers_utils.ExpandCosImageFlag(client)

        machine_type = instance_utils.InterpretMachineType(
            machine_type=args.machine_type,
            custom_cpu=args.custom_cpu,
            custom_memory=args.custom_memory,
            ext=getattr(args, 'custom_extensions', None))

        metadata = containers_utils.CreateMetadataMessage(
            client.messages, args.run_as_privileged, args.container_manifest,
            args.docker_image, args.port_mappings, args.run_command,
            user_metadata, instance_template_ref.Name())

        request = client.messages.ComputeInstanceTemplatesInsertRequest(
            instanceTemplate=client.messages.InstanceTemplate(
                properties=client.messages.InstanceProperties(
                    machineType=machine_type,
                    disks=self._CreateDiskMessages(
                        holder, args, boot_disk_size_gb, image_uri,
                        instance_template_ref.project),
                    canIpForward=args.can_ip_forward,
                    metadata=metadata,
                    minCpuPlatform=args.min_cpu_platform,
                    networkInterfaces=[network_interface],
                    serviceAccounts=service_accounts,
                    scheduling=scheduling,
                    tags=containers_utils.CreateTagsMessage(
                        client.messages, args.tags),
                ),
                description=args.description,
                name=instance_template_ref.Name(),
            ),
            project=instance_template_ref.project)

        return client.MakeRequests([(client.apitools_client.instanceTemplates,
                                     'Insert', request)])
예제 #2
0
    def Run(self, args):
        holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
        client = holder.client
        start = time_util.CurrentTimeSec()

        # Set up Encryption utilities.
        openssl_executable = files.FindExecutableOnPath('openssl')
        if windows_encryption_utils:
            crypt = windows_encryption_utils.WinCrypt()
        elif openssl_executable:
            crypt = openssl_encryption_utils.OpensslCrypt(openssl_executable)
        else:
            raise utils.MissingDependencyError(
                'Your platform does not support OpenSSL.')

        # Get Authenticated email address and default username.
        email = gaia.GetAuthenticatedGaiaEmail(client.apitools_client.http)
        if args.user:
            user = args.user
        else:
            user = gaia.MapGaiaEmailToDefaultAccountName(email)

        if args.name == user:
            raise utils.InvalidUserError(
                MACHINE_USERNAME_SAME_ERROR.format(user, args.name))

        # Warn user (This warning doesn't show for non-interactive sessions).
        message = RESET_PASSWORD_WARNING.format(user)
        prompt_string = (
            'Would you like to set or reset the password for [{0}]'.format(
                user))
        console_io.PromptContinue(message=message,
                                  prompt_string=prompt_string,
                                  cancel_on_no=True)

        log.status.Print(
            'Resetting and retrieving password for [{0}] on [{1}]'.format(
                user, args.name))

        # Get Encryption Keys.
        key = crypt.GetKeyPair()
        modulus, exponent = crypt.GetModulusExponentFromPublicKey(
            crypt.GetPublicKey(key))

        # Create Windows key entry.
        self.windows_key_entry = self._ConstructWindowsKeyEntry(
            user, modulus, exponent, email)

        # Call ReadWriteCommad.Run() which will fetch the instance and update
        # the metadata (using the data in self.windows_key_entry).
        instance_ref = self.CreateReference(client, holder.resources, args)
        get_request = self.GetGetRequest(client, instance_ref)

        objects = client.MakeRequests([get_request])

        new_object = self.Modify(client, objects[0])

        # If existing object is equal to the proposed object or if
        # Modify() returns None, then there is no work to be done, so we
        # print the resource and return.
        if objects[0] == new_object:
            log.status.Print(
                'No change requested; skipping update for [{0}].'.format(
                    objects[0].name))
            return objects

        updated_instance = client.MakeRequests(
            [self.GetSetRequest(client, instance_ref, new_object)])[0]

        # Retrieve and Decrypt the password from the serial console.
        enc_password = self._GetEncryptedPasswordFromSerialPort(
            client, instance_ref, modulus)
        password = crypt.DecryptMessage(key, enc_password)

        # Get External IP address.
        try:
            access_configs = updated_instance.networkInterfaces[
                0].accessConfigs
            external_ip_address = access_configs[0].natIP
        except (KeyError, IndexError) as _:
            log.warn(NO_IP_WARNING.format(updated_instance.name))
            external_ip_address = None

        # Check for old Windows credentials.
        if self.old_metadata_keys:
            log.warn(
                OLD_KEYS_WARNING.format(instance_ref.instance,
                                        instance_ref.instance,
                                        instance_ref.zone,
                                        ','.join(self.old_metadata_keys)))

        log.info('Total Elapsed Time: {0}'.format(time_util.CurrentTimeSec() -
                                                  start))

        # The connection info resource.
        connection_info = {
            'username': user,
            'password': password,
            'ip_address': external_ip_address
        }
        return connection_info
예제 #3
0
 def _GetSslPolicy(self, args):
     holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
     return self.SSL_POLICY_ARG.ResolveAsResource(
         args, holder.resources) if args.ssl_policy else None
예제 #4
0
 def Run(self, args):
     holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
     return _Run(args, holder, self._include_l7_internal_load_balancing,
                 self._include_log_config)
예제 #5
0
 def Run(self, args):
     holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
     instance_ref = self._GetInstanceRef(holder, args)
     return self._GetInstance(holder, instance_ref)
예제 #6
0
    def Run(self, args):
        """Returns a list of requests necessary for updating forwarding rules."""
        holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
        client = holder.client.apitools_client
        messages = holder.client.messages

        forwarding_rule_ref = self.FORWARDING_RULE_ARG.ResolveAsResource(
            args,
            holder.resources,
            scope_lister=compute_flags.GetDefaultScopeLister(holder.client))

        labels_diff = labels_util.Diff.FromUpdateArgs(args)
        if not labels_diff.MayHaveUpdates() and args.network_tier is None:
            raise calliope_exceptions.ToolException(
                'At least one property must be specified.')

        # Get replacement.
        if forwarding_rule_ref.Collection() == 'compute.globalForwardingRules':
            get_request = (client.globalForwardingRules, 'Get',
                           messages.ComputeGlobalForwardingRulesGetRequest(
                               forwardingRule=forwarding_rule_ref.Name(),
                               project=forwarding_rule_ref.project))
            labels_value = messages.GlobalSetLabelsRequest.LabelsValue
        else:
            get_request = (client.forwardingRules, 'Get',
                           messages.ComputeForwardingRulesGetRequest(
                               forwardingRule=forwarding_rule_ref.Name(),
                               project=forwarding_rule_ref.project,
                               region=forwarding_rule_ref.region))
            labels_value = messages.RegionSetLabelsRequest.LabelsValue

        objects = holder.client.MakeRequests([get_request])
        forwarding_rule = objects[0]

        forwarding_rule_replacement = self.Modify(messages, args,
                                                  forwarding_rule)
        label_replacement = labels_diff.Apply(labels_value,
                                              forwarding_rule.labels)

        # Create requests.
        requests = []

        if forwarding_rule_ref.Collection() == 'compute.globalForwardingRules':
            if forwarding_rule_replacement:
                request = messages.ComputeGlobalForwardingRulesPatchRequest(
                    forwardingRule=forwarding_rule_ref.Name(),
                    forwardingRuleResource=forwarding_rule_replacement,
                    project=forwarding_rule_ref.project)
                requests.append(
                    (client.globalForwardingRules, 'Patch', request))
            if label_replacement:
                request = self._CreateGlobalSetLabelsRequest(
                    messages, forwarding_rule_ref, forwarding_rule,
                    label_replacement)
                requests.append(
                    (client.globalForwardingRules, 'SetLabels', request))
        else:
            if forwarding_rule_replacement:
                request = messages.ComputeForwardingRulesPatchRequest(
                    forwardingRule=forwarding_rule_ref.Name(),
                    forwardingRuleResource=forwarding_rule_replacement,
                    project=forwarding_rule_ref.project,
                    region=forwarding_rule_ref.region)
                requests.append((client.forwardingRules, 'Patch', request))
            if label_replacement:
                request = self._CreateRegionalSetLabelsRequest(
                    messages, forwarding_rule_ref, forwarding_rule,
                    label_replacement)
                requests.append((client.forwardingRules, 'SetLabels', request))

        return holder.client.MakeRequests(requests)
예제 #7
0
  def Run(self, args):
    # Manually ensure replace/incremental flags are mutually exclusive.
    router_utils.CheckIncompatibleFlagsOrRaise(args)

    holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
    messages = holder.client.messages
    service = holder.client.apitools_client.routers

    router_ref = self.ROUTER_ARG.ResolveAsResource(args, holder.resources)

    request_type = messages.ComputeRoutersGetRequest
    replacement = service.Get(request_type(**router_ref.AsDict()))
    existing_mode = replacement.bgp.advertiseMode

    if router_utils.HasReplaceAdvertisementFlags(args):
      mode, groups, ranges = router_utils.ParseAdvertisements(
          messages=messages, resource_class=messages.RouterBgp, args=args)

      router_utils.PromptIfSwitchToDefaultMode(
          messages=messages,
          resource_class=messages.RouterBgp,
          existing_mode=existing_mode,
          new_mode=mode)

      attrs = {
          'advertiseMode': mode,
          'advertisedGroups': groups,
          'advertisedIpRanges': ranges,
      }

      for attr, value in attrs.items():
        if value is not None:
          setattr(replacement.bgp, attr, value)

    if router_utils.HasIncrementalAdvertisementFlags(args):
      # This operation should only be run on custom mode routers.
      router_utils.ValidateCustomMode(
          messages=messages,
          resource_class=messages.RouterBgp,
          resource=replacement.bgp)

      # These arguments are guaranteed to be mutually exclusive in args.
      if args.add_advertisement_groups:
        groups_to_add = routers_utils.ParseGroups(
            resource_class=messages.RouterBgp,
            groups=args.add_advertisement_groups)
        replacement.bgp.advertisedGroups.extend(groups_to_add)

      if args.remove_advertisement_groups:
        groups_to_remove = routers_utils.ParseGroups(
            resource_class=messages.RouterBgp,
            groups=args.remove_advertisement_groups)
        router_utils.RemoveGroupsFromAdvertisements(
            messages=messages,
            resource_class=messages.RouterBgp,
            resource=replacement.bgp,
            groups=groups_to_remove)

      if args.add_advertisement_ranges:
        ip_ranges_to_add = routers_utils.ParseIpRanges(
            messages=messages, ip_ranges=args.add_advertisement_ranges)
        replacement.bgp.advertisedIpRanges.extend(ip_ranges_to_add)

      if args.remove_advertisement_ranges:
        router_utils.RemoveIpRangesFromAdvertisements(
            messages=messages,
            resource_class=messages.RouterBgp,
            resource=replacement.bgp,
            ip_ranges=args.remove_advertisement_ranges)

    # Cleared list fields need to be explicitly identified for Patch API.
    cleared_fields = []
    if not replacement.bgp.advertisedGroups:
      cleared_fields.append('bgp.advertisedGroups')
    if not replacement.bgp.advertisedIpRanges:
      cleared_fields.append('bgp.advertisedIpRanges')

    with holder.client.apitools_client.IncludeFields(cleared_fields):
      request_type = messages.ComputeRoutersPatchRequest
      result = service.Patch(
          request_type(
              project=router_ref.project,
              region=router_ref.region,
              router=router_ref.Name(),
              routerResource=replacement))

    operation_ref = resources.REGISTRY.Parse(
        result.name,
        collection='compute.regionOperations',
        params={
            'project': router_ref.project,
            'region': router_ref.region,
        })

    if args.async:
      log.UpdatedResource(
          operation_ref,
          kind='router [{0}]'.format(router_ref.Name()),
          async=True,
          details='Run the [gcloud compute operations describe] command '
          'to check the status of this operation.')
      return result

    target_router_ref = holder.resources.Parse(
        router_ref.Name(),
        collection='compute.routers',
        params={
            'project': router_ref.project,
            'region': router_ref.region,
        })

    operation_poller = poller.Poller(service, target_router_ref)
    return waiter.WaitFor(operation_poller, operation_ref,
                          'Updating router [{0}]'.format(router_ref.Name()))
예제 #8
0
    def Run(self, args):
        """See base.CreateCommand."""

        holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
        messages = holder.client.messages
        service = holder.client.apitools_client.routers

        router_ref = self.ROUTER_ARG.ResolveAsResource(args, holder.resources)
        network_ref = self.NETWORK_ARG.ResolveAsResource(
            args, holder.resources)

        router_resource = messages.Router(name=router_ref.Name(),
                                          description=args.description,
                                          network=network_ref.SelfLink(),
                                          bgp=messages.RouterBgp(asn=args.asn))

        if router_utils.HasReplaceAdvertisementFlags(args):
            mode, groups, ranges = router_utils.ParseAdvertisements(
                messages=messages,
                resource_class=messages.RouterBgp,
                args=args)

            attrs = {
                'advertiseMode': mode,
                'advertisedGroups': groups,
                'advertisedIpRanges': ranges,
            }

            for attr, value in attrs.iteritems():
                if value is not None:
                    setattr(router_resource.bgp, attr, value)

        result = service.Insert(
            messages.ComputeRoutersInsertRequest(router=router_resource,
                                                 region=router_ref.region,
                                                 project=router_ref.project))

        operation_ref = resources.REGISTRY.Parse(
            result.name,
            collection='compute.regionOperations',
            params={
                'project': router_ref.project,
                'region': router_ref.region,
            })

        if args. async:
            # Override the networks list format with the default operations format
            if not args.IsSpecified('format'):
                args.format = 'none'
            log.CreatedResource(
                operation_ref,
                kind='router [{0}]'.format(router_ref.Name()),
                async=True,
                details='Run the [gcloud compute operations describe] command '
                'to check the status of this operation.')
            return result

        target_router_ref = holder.resources.Parse(
            router_ref.Name(),
            collection='compute.routers',
            params={
                'project': router_ref.project,
                'region': router_ref.region,
            })

        operation_poller = poller.Poller(service, target_router_ref)
        return waiter.WaitFor(
            operation_poller, operation_ref,
            'Creating router [{0}]'.format(router_ref.Name()))
예제 #9
0
    def Run(self, args):
        holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
        messages = holder.client.messages
        service = holder.client.apitools_client.routers

        router_ref = self.ROUTER_ARG.ResolveAsResource(args, holder.resources)

        request_type = messages.ComputeRoutersGetRequest
        replacement = service.Get(request_type(**router_ref.AsDict()))

        # Retrieve specified NAT and update base fields.
        existing_nat = nats_utils.FindNatOrRaise(replacement, args.name)
        nat = nats_utils.UpdateNatMessage(existing_nat, args, holder)

        cleared_fields = []
        if args.clear_min_ports_per_vm:
            cleared_fields.append('minPortsPerVm')
        if args.clear_udp_idle_timeout:
            cleared_fields.append('udpIdleTimeoutSec')
        if args.clear_icmp_idle_timeout:
            cleared_fields.append('icmpIdleTimeoutSec')
        if args.clear_tcp_transitory_idle_timeout:
            cleared_fields.append('tcpTransitoryIdleTimeoutSec')
        if args.clear_tcp_established_idle_timeout:
            cleared_fields.append('tcpEstablishedIdleTimeoutSec')

        with holder.client.apitools_client.IncludeFields(cleared_fields):
            request_type = messages.ComputeRoutersPatchRequest
            result = service.Patch(
                request_type(project=router_ref.project,
                             region=router_ref.region,
                             router=router_ref.Name(),
                             routerResource=replacement))

        operation_ref = resources.REGISTRY.Parse(
            result.name,
            collection='compute.regionOperations',
            params={
                'project': router_ref.project,
                'region': router_ref.region,
            })

        if args.async_:
            log.UpdatedResource(
                operation_ref,
                kind='nat [{0}] in router [{1}]'.format(
                    nat.name, router_ref.Name()),
                is_async=True,
                details='Run the [gcloud compute operations describe] command '
                'to check the status of this operation.')
            return result

        target_router_ref = holder.resources.Parse(
            router_ref.Name(),
            collection='compute.routers',
            params={
                'project': router_ref.project,
                'region': router_ref.region,
            })

        operation_poller = poller.Poller(service, target_router_ref)
        return waiter.WaitFor(
            operation_poller, operation_ref,
            'Updating nat [{0}] in router [{1}]'.format(
                nat.name, router_ref.Name()))
예제 #10
0
    def Run(self, args):
        """Default run method implementation."""
        super(Routes, self).Run(args)

        self._use_accounts_service = False

        holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
        resource_registry = holder.resources
        ssh_helper = ssh_utils.BaseSSHCLIHelper()
        ssh_helper.Run(args)

        # We store always needed commands non-changing fields
        self._args = args
        self._ssh_helper = ssh_helper

        # We obtain generic parameters of the call
        project = properties.VALUES.core.project.GetOrFail()
        filters = _RoutesArgs.GetFilters(args)
        instances = _RoutesQueries.ObtainInstances(
            args.names,
            service=self.compute.instances,
            project=project,
            zones=args.zones,
            filters=filters,
            http=self.http,
            batch_url=self.batch_url)

        user = args.user
        if not user:
            user = ssh.GetDefaultSshUsername()

        # We unpack the flags
        dry_run = args.dry_run
        reverse_traceroute = args.reverse_traceroute
        traceroute_args = args.traceroute_args
        external_route_ip = args.external_route_ip

        internal_helpers.PrintHeader(instances)
        prompt = 'The following VMs will be tracerouted.'
        if instances and not dry_run and not console_io.PromptContinue(prompt):
            return
        # Sometimes the prompt would appear after the instance data
        log.out.flush()

        for instance in instances:
            header = 'Checking instance %s' % instance.name
            log.out.Print(header)
            log.out.Print('-' * len(header))

            try:
                self.TracerouteInstance(instance, traceroute_args, dry_run,
                                        resource_registry)
            except exceptions.ToolException as e:
                log.error('Error routing to instance')
                log.error(str(e))
                continue

            if reverse_traceroute:
                try:
                    has_traceroute = self.CheckTraceroute(
                        instance, user, dry_run, resource_registry)
                    if has_traceroute:
                        # We obtain the self ip
                        if not external_route_ip:
                            external_route_ip = self.ObtainSelfIp(
                                instance, user, dry_run, resource_registry)
                        if external_route_ip:
                            self.ReverseTracerouteInstance(
                                instance, user, external_route_ip,
                                traceroute_args, dry_run, resource_registry)
                        else:
                            log.out.Print(
                                'Unable to obtain self ip. Aborting.')
                    else:
                        log.out.Print(
                            'Please make sure traceroute is installed in PATH to move on.'
                        )
                except ssh.CommandError as e:
                    log.error(str(e))
            log.out.Print('')  # Separator
    def Run(cls, release_track, args, support_redirect, support_rate_limit,
            support_header_action):
        """Validates arguments and patches a security policy rule."""
        modified_fields = [
            args.description, args.src_ip_ranges, args.expression, args.action,
            args.preview is not None
        ]
        min_args = [
            '--description', '--src-ip-ranges', '--expression', '--action',
            '--preview'
        ]
        if support_redirect:
            modified_fields.extend([args.redirect_type, args.redirect_target])
            min_args.extend(['--redirect-type', '--redirect-target'])
        if support_header_action:
            modified_fields.extend([args.request_headers_to_add])
            min_args.extend(['--request-headers-to-add'])
        if support_rate_limit:
            modified_fields.extend([
                args.rate_limit_threshold_count,
                args.rate_limit_threshold_interval_sec, args.conform_action,
                args.exceed_action, args.enforce_on_key,
                args.enforce_on_key_name, args.ban_threshold_count,
                args.ban_threshold_interval_sec, args.ban_duration_sec
            ])
            min_args.extend([
                '--rate-limit-threshold-count',
                '--rate-limit-threshold-interval-sec', '--conform-action',
                '--exceed-action', '--enforce-on-key', '--enforce-on-key-name',
                '--ban-threshold-count', '--ban-threshold-interval-sec',
                '--ban-duration-sec'
            ])
        if not any(modified_fields):
            raise exceptions.MinimumArgumentException(
                min_args, 'At least one property must be modified.')

        holder = base_classes.ComputeApiHolder(release_track)
        ref = holder.resources.Parse(
            args.name,
            collection='compute.securityPolicyRules',
            params={
                'project': properties.VALUES.core.project.GetOrFail,
                'securityPolicy': args.security_policy
            })
        security_policy_rule = client.SecurityPolicyRule(
            ref, compute_client=holder.client)

        redirect_options = None
        rate_limit_options = None
        if support_redirect:
            redirect_options = (security_policies_utils.CreateRedirectOptions(
                holder.client, args))
        if support_rate_limit:
            rate_limit_options = (
                security_policies_utils.CreateRateLimitOptions(
                    holder.client, args))

        request_headers_to_add = None
        if support_header_action:
            request_headers_to_add = args.request_headers_to_add

        return security_policy_rule.Patch(
            src_ip_ranges=args.src_ip_ranges,
            expression=args.expression,
            action=args.action,
            description=args.description,
            preview=args.preview,
            redirect_options=redirect_options,
            rate_limit_options=rate_limit_options,
            request_headers_to_add=request_headers_to_add)
예제 #12
0
    def Run(self, args):
        """See ssh_utils.BaseSSHCommand.Run."""
        holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
        client = holder.client

        ssh_helper = ssh_utils.BaseSSHHelper()
        ssh_helper.Run(args)
        ssh_helper.keys.EnsureKeysExist(args.force_key_file_overwrite,
                                        allow_passphrase=True)

        ssh_config_file = files.ExpandHomeDir(args.ssh_config_file
                                              or ssh.PER_USER_SSH_CONFIG_FILE)

        instances = None
        try:
            existing_content = files.ReadFileContents(ssh_config_file)
        except files.Error as e:
            existing_content = ''
            log.debug('SSH Config File [{0}] could not be opened: {1}'.format(
                ssh_config_file, e))

        if args.remove:
            compute_section = ''
            try:
                new_content = _RemoveComputeSection(existing_content)
            except MultipleComputeSectionsError:
                raise MultipleComputeSectionsError(ssh_config_file)
        else:
            ssh_helper.EnsureSSHKeyIsInProject(
                client, ssh.GetDefaultSshUsername(warn_on_account_user=True),
                None)
            instances = list(self.GetRunningInstances(client))
            if instances:
                compute_section = _BuildComputeSection(
                    instances, ssh_helper.keys.key_file,
                    ssh.KnownHosts.DEFAULT_PATH)
            else:
                compute_section = ''

        if existing_content and not args.remove:
            try:
                new_content = _MergeComputeSections(existing_content,
                                                    compute_section)
            except MultipleComputeSectionsError:
                raise MultipleComputeSectionsError(ssh_config_file)
        elif not existing_content:
            new_content = compute_section

        if args.dry_run:
            log.out.write(new_content or '')
            return

        if new_content != existing_content:
            if (os.path.exists(ssh_config_file)
                    and platforms.OperatingSystem.Current()
                    is not platforms.OperatingSystem.WINDOWS):
                ssh_config_perms = os.stat(ssh_config_file).st_mode
                # From `man 5 ssh_config`:
                #    this file must have strict permissions: read/write for the user,
                #    and not accessible by others.
                # We check that here:
                if not (ssh_config_perms & stat.S_IRWXU == stat.S_IWUSR
                        | stat.S_IRUSR and ssh_config_perms & stat.S_IWGRP == 0
                        and ssh_config_perms & stat.S_IWOTH == 0):
                    log.warning(
                        'Invalid permissions on [{0}]. Please change to match ssh '
                        'requirements (see man 5 ssh).')
            # TODO(b/36050483): This write will not work very well if there is
            # a lot of write contention for the SSH config file. We should
            # add a function to do a better job at "atomic file writes".
            files.WriteFileContents(ssh_config_file, new_content, private=True)

        if compute_section:
            log.out.write(
                textwrap.dedent("""\
          You should now be able to use ssh/scp with your instances.
          For example, try running:

            $ ssh {alias}

          """.format(alias=_CreateAlias(instances[0]))))

        elif compute_section == '' and instances:  # pylint: disable=g-explicit-bool-comparison
            log.warning(
                'No host aliases were added to your SSH configs because instances'
                ' have no public IP.')

        elif not instances and not args.remove:
            log.warning(
                'No host aliases were added to your SSH configs because you do not '
                'have any running instances. Try running this command again after '
                'running some instances.')
예제 #13
0
파일: create.py 프로젝트: twinDev/order-api
 def Run(self, args):
     holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
     return _Run(args, holder)
예제 #14
0
 def Run(self, args):
     _CheckMissingArgument(args)
     holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
     return _Run(args, holder, self.SSL_CERTIFICATES_ARG,
                 self.TARGET_HTTPS_PROXY_ARG, self.URL_MAP_ARG,
                 self.SSL_POLICY_ARG)
예제 #15
0
    def Run(self, args):
        """Returns a list of requests necessary for snapshotting disks."""
        holder = base_classes.ComputeApiHolder(self.ReleaseTrack())

        disk_refs = SnapshotDisks.disks_arg.ResolveAsResource(
            args,
            holder.resources,
            scope_lister=flags.GetDefaultScopeLister(holder.client))
        if args.snapshot_names:
            if len(disk_refs) != len(args.snapshot_names):
                raise exceptions.ToolException(
                    '[--snapshot-names] must have the same number of values as disks '
                    'being snapshotted.')
            snapshot_names = args.snapshot_names
        else:
            # Generates names like "d52jsqy3db4q".
            snapshot_names = [
                name_generator.GenerateRandomName() for _ in disk_refs
            ]

        snapshot_refs = [
            holder.resources.Parse(
                snapshot_name,
                params={
                    'project': properties.VALUES.core.project.GetOrFail,
                },
                collection='compute.snapshots')
            for snapshot_name in snapshot_names
        ]

        client = holder.client.apitools_client
        messages = holder.client.messages

        requests = []

        for disk_ref, snapshot_ref in zip(disk_refs, snapshot_refs):
            # This feature is only exposed in alpha/beta
            allow_rsa_encrypted = self.ReleaseTrack() in [
                base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA
            ]
            csek_keys = csek_utils.CsekKeyStore.FromArgs(
                args, allow_rsa_encrypted)
            disk_key_or_none = csek_utils.MaybeLookupKeyMessage(
                csek_keys, disk_ref, client)

            snapshot_message = messages.Snapshot(
                name=snapshot_ref.Name(),
                description=args.description,
                sourceDiskEncryptionKey=disk_key_or_none)
            if (hasattr(args, 'storage_location')
                    and args.IsSpecified('storage_location')):
                snapshot_message.storageLocations = [args.storage_location]

            if disk_ref.Collection() == 'compute.disks':
                request = messages.ComputeDisksCreateSnapshotRequest(
                    disk=disk_ref.Name(),
                    snapshot=snapshot_message,
                    project=disk_ref.project,
                    zone=disk_ref.zone,
                    guestFlush=args.guest_flush)
                requests.append((client.disks, 'CreateSnapshot', request))
            elif disk_ref.Collection() == 'compute.regionDisks':
                request = messages.ComputeRegionDisksCreateSnapshotRequest(
                    disk=disk_ref.Name(),
                    snapshot=snapshot_message,
                    project=disk_ref.project,
                    region=disk_ref.region)
                if hasattr(request,
                           'guestFlush'):  # only available in alpha API
                    guest_flush = getattr(args, 'guest_flush', None)
                    if guest_flush is not None:
                        request.guestFlush = guest_flush
                requests.append(
                    (client.regionDisks, 'CreateSnapshot', request))

        errors_to_collect = []
        responses = holder.client.BatchRequests(requests, errors_to_collect)
        for r in responses:
            err = getattr(r, 'error', None)
            if err:
                errors_to_collect.append(poller.OperationErrors(err.errors))
        if errors_to_collect:
            raise core_exceptions.MultiError(errors_to_collect)

        operation_refs = [
            holder.resources.Parse(r.selfLink) for r in responses
        ]

        if args. async:
            for operation_ref in operation_refs:
                log.status.Print('Disk snapshot in progress for [{}].'.format(
                    operation_ref.SelfLink()))
            log.status.Print(
                'Use [gcloud compute operations describe URI] command '
                'to check the status of the operation(s).')
            return responses

        operation_poller = poller.BatchPoller(holder.client, client.snapshots,
                                              snapshot_refs)
        return waiter.WaitFor(operation_poller,
                              poller.OperationBatch(operation_refs),
                              'Creating snapshot(s) {0}'.format(', '.join(
                                  s.Name() for s in snapshot_refs)),
                              max_wait_ms=None)
예제 #16
0
    def Run(self, args):
        """Returns a list of requests necessary for adding images."""
        holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
        client = holder.client
        messages = client.messages
        resource_parser = holder.resources

        image_ref = Create.DISK_IMAGE_ARG.ResolveAsResource(
            args, holder.resources)
        image = messages.Image(
            name=image_ref.image,
            description=args.description,
            sourceType=messages.Image.SourceTypeValueValuesEnum.RAW,
            family=args.family)

        csek_keys = csek_utils.CsekKeyStore.FromArgs(
            args, self._ALLOW_RSA_ENCRYPTED_CSEK_KEYS)
        if csek_keys:
            image.imageEncryptionKey = csek_utils.MaybeToMessage(
                csek_keys.LookupKey(
                    image_ref, raise_if_missing=args.require_csek_key_create),
                client.apitools_client)
        image.imageEncryptionKey = kms_utils.MaybeGetKmsKey(
            args, image_ref.project, client.apitools_client,
            image.imageEncryptionKey)

        # Validate parameters.
        if args.source_disk_zone and not args.source_disk:
            raise exceptions.ToolException(
                'You cannot specify [--source-disk-zone] unless you are specifying '
                '[--source-disk].')

        source_image_project = args.source_image_project
        source_image = args.source_image
        source_image_family = args.source_image_family

        if source_image_project and not (source_image or source_image_family):
            raise exceptions.ToolException(
                'You cannot specify [--source-image-project] unless you are '
                'specifying [--source-image] or [--source-image-family].')

        if source_image or source_image_family:
            image_expander = image_utils.ImageExpander(client, resource_parser)
            _, source_image_ref = image_expander.ExpandImageFlag(
                user_project=image_ref.project,
                image=source_image,
                image_family=source_image_family,
                image_project=source_image_project,
                return_image_resource=True)
            image.sourceImage = source_image_ref.selfLink
            image.sourceImageEncryptionKey = csek_utils.MaybeLookupKeyMessage(
                csek_keys, source_image_ref, client.apitools_client)

        # TODO(b/30086260): use resources.REGISTRY.Parse() for GCS URIs.
        if args.source_uri:
            source_uri = utils.NormalizeGoogleStorageUri(args.source_uri)
            image.rawDisk = messages.Image.RawDiskValue(source=source_uri)
        elif args.source_disk:
            source_disk_ref = flags.SOURCE_DISK_ARG.ResolveAsResource(
                args,
                holder.resources,
                scope_lister=compute_flags.GetDefaultScopeLister(client))
            image.sourceDisk = source_disk_ref.SelfLink()
            image.sourceDiskEncryptionKey = csek_utils.MaybeLookupKeyMessage(
                csek_keys, source_disk_ref, client.apitools_client)
        elif hasattr(args, 'source_snapshot') and args.source_snapshot:
            source_snapshot_ref = flags.SOURCE_SNAPSHOT_ARG.ResolveAsResource(
                args,
                holder.resources,
                scope_lister=compute_flags.GetDefaultScopeLister(client))
            image.sourceSnapshot = source_snapshot_ref.SelfLink()
            image.sourceSnapshotEncryptionKey = csek_utils.MaybeLookupKeyMessage(
                csek_keys, source_snapshot_ref, client.apitools_client)

        if args.licenses:
            image.licenses = args.licenses

        guest_os_features = getattr(args, 'guest_os_features', [])
        if guest_os_features:
            guest_os_feature_messages = []
            for feature in guest_os_features:
                gf_type = messages.GuestOsFeature.TypeValueValuesEnum(feature)
                guest_os_feature = messages.GuestOsFeature()
                guest_os_feature.type = gf_type
                guest_os_feature_messages.append(guest_os_feature)
            image.guestOsFeatures = guest_os_feature_messages

        request = messages.ComputeImagesInsertRequest(
            image=image, project=image_ref.project)

        args_labels = getattr(args, 'labels', None)
        if args_labels:
            labels = messages.Image.LabelsValue(additionalProperties=[
                messages.Image.LabelsValue.AdditionalProperty(key=key,
                                                              value=value)
                for key, value in sorted(args_labels.iteritems())
            ])
            request.image.labels = labels

        # --force is in GA, --force-create is in beta and deprecated.
        if args.force or getattr(args, 'force_create', None):
            request.forceCreate = True

        return client.MakeRequests([(client.apitools_client.images, 'Insert',
                                     request)])
예제 #17
0
    def Run(self, args):
        holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
        ref = self.ORG_SECURITY_POLICY_ARG.ResolveAsResource(
            args, holder.resources, with_project=False)
        security_policy_rule_client = client.OrgSecurityPolicyRule(
            ref=ref, compute_client=holder.client)
        priority = rule_utils.ConvertPriorityToInt(ref.Name())
        src_ip_ranges = []
        dest_ip_ranges = []
        dest_port_list = []
        target_resources = []
        enable_logging = False
        should_setup_match = False
        traffic_direct = None
        matcher = None
        if args.IsSpecified('src_ip_ranges'):
            src_ip_ranges = args.src_ip_ranges
            should_setup_match = True
        if args.IsSpecified('dest_ip_ranges'):
            dest_ip_ranges = args.dest_ip_ranges
            should_setup_match = True
        if args.IsSpecified('dest_ports'):
            dest_port_list = rule_utils.ParseDestPorts(args.dest_ports,
                                                       holder.client.messages)
            should_setup_match = True
        if args.IsSpecified('target_resources'):
            target_resources = args.target_resources
        if args.IsSpecified('enable_logging'):
            enable_logging = True
        if args.IsSpecified('new_priority'):
            new_priority = rule_utils.ConvertPriorityToInt(args.new_priority)
        else:
            new_priority = priority

        # If need to construct a new matcher.
        if should_setup_match:
            matcher = holder.client.messages.SecurityPolicyRuleMatcher(
                versionedExpr=holder.client.messages.SecurityPolicyRuleMatcher.
                VersionedExprValueValuesEnum.FIREWALL,
                config=holder.client.messages.SecurityPolicyRuleMatcherConfig(
                    srcIpRanges=src_ip_ranges,
                    destIpRanges=dest_ip_ranges,
                    destPorts=dest_port_list))
        if args.IsSpecified('direction'):
            if args.direction == 'INGRESS':
                traffic_direct = holder.client.messages.SecurityPolicyRule.DirectionValueValuesEnum.INGRESS
            else:
                traffic_direct = holder.client.messages.SecurityPolicyRule.DirectionValueValuesEnum.EGRESS

        security_policy_rule = holder.client.messages.SecurityPolicyRule(
            priority=new_priority,
            action=args.action,
            match=matcher,
            direction=traffic_direct,
            targetResources=target_resources,
            description=args.description,
            enableLogging=enable_logging)

        return security_policy_rule_client.Update(
            priority=priority,
            security_policy=args.security_policy,
            security_policy_rule=security_policy_rule)
예제 #18
0
    def Run(self, args):
        compute_holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
        # Fail early if the requested image name is invalid or already exists.
        _CheckImageName(args.image_name)
        _CheckForExistingImage(args.image_name, compute_holder)

        storage_client = storage_api.StorageClient()
        daisy_bucket = daisy_utils.GetAndCreateDaisyBucket(
            storage_client=storage_client)
        image_uuid = uuid.uuid4()

        daisy_vars = ['image_name={}'.format(args.image_name)]
        if args.source_image:
            # If we're starting from an image, then we've already imported it.
            workflow = _IMPORT_FROM_IMAGE_WORKFLOW
            daisy_vars.append('translate_workflow={}'.format(
                _GetTranslateWorkflow(args)))
            ref = resources.REGISTRY.Parse(
                args.source_image,
                collection='compute.images',
                params={'project': properties.VALUES.core.project.GetOrFail})
            # source_name should be of the form 'global/images/image-name'.
            source_name = ref.RelativeName()[len(ref.Parent().RelativeName() +
                                                 '/'):]
            daisy_vars.append('source_image={}'.format(source_name))
        else:
            # If the file is an OVA file, print a warning.
            if args.source_file.endswith('.ova'):
                log.warning(
                    'The specified input file may contain more than one virtual disk. '
                    'Only the first vmdk disk will be imported.')
            elif (args.source_file.endswith('.tar.gz')
                  or args.source_file.endswith('.tgz')):
                raise exceptions.BadFileException(
                    '`gcloud compute images import` does not support compressed '
                    'archives. Please extract your image and try again.\n If you got '
                    'this file by exporting an image from Compute Engine (e.g. by '
                    'using `gcloud compute images export`) then you can instead use '
                    '`gcloud compute images create` to create your image from your '
                    '.tar.gz file.')

            # Get the image into the scratch bucket, wherever it is now.
            if _IsLocalFile(args.source_file):
                gcs_uri = _UploadToGcs(args. async, args.source_file,
                                       daisy_bucket, image_uuid,
                                       storage_client)
            else:
                source_file = _MakeGcsUri(args.source_file)
                gcs_uri = _CopyToScratchBucket(source_file, image_uuid,
                                               storage_client, daisy_bucket)

            # Import and (maybe) translate from the scratch bucket.
            daisy_vars.append('source_disk_file={}'.format(gcs_uri))
            if args.data_disk:
                workflow = _IMPORT_WORKFLOW
            else:
                workflow = _IMPORT_AND_TRANSLATE_WORKFLOW
                daisy_vars.append('translate_workflow={}'.format(
                    _GetTranslateWorkflow(args)))

        self._ProcessAdditionalArgs(args, daisy_vars)

        # TODO(b/79591894): Once we've cleaned up the Argo output, replace this
        # warning message with a ProgressTracker spinner.
        log.warning('Importing image. This may take up to 2 hours.')
        return daisy_utils.RunDaisyBuild(args,
                                         workflow,
                                         ','.join(daisy_vars),
                                         daisy_bucket=daisy_bucket,
                                         user_zone=args.zone,
                                         output_filter=_OUTPUT_FILTER)
 def Run(self, args):
     holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
     return _Run(args, holder, self.TARGET_HTTPS_PROXY_ARG,
                 self.ReleaseTrack())
예제 #20
0
 def Run(self, args):
     holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
     instance_ref = self._GetInstanceRef(holder, args)
     guest_attributes_json = self._GetGuestInventoryGuestAttributes(
         holder, instance_ref)
     return self._GetFormattedGuestAttributes(guest_attributes_json)
예제 #21
0
 def Run(self, args):
     """Issues the request necessary for adding the health check."""
     holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
     return _Run(args, holder, supports_response=True)
예제 #22
0
    def Run(self, args):
        holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
        client = holder.client
        messages = holder.client.messages

        if hasattr(args, 'project') and args.project:
            project = args.project
        else:
            project = properties.VALUES.core.project.GetOrFail()

        if hasattr(args, 'region') and args.region:
            region = args.region
        else:
            region = properties.VALUES.compute.region.GetOrFail()

        network = network_flags.NetworkArgumentForOtherResource(
            short_help=None).ResolveAsResource(args, holder.resources)
        network_ref = network.SelfLink() if network else None

        request = messages.ComputeRegionNetworkFirewallPoliciesGetEffectiveFirewallsRequest(
            project=project, region=region, network=network_ref)

        responses = client.MakeRequests([
            (client.apitools_client.regionNetworkFirewallPolicies,
             'GetEffectiveFirewalls', request)
        ])
        res = responses[0]
        network_firewall = []
        all_firewall_policy = []

        if hasattr(res, 'firewalls'):
            network_firewall = firewalls_utils.SortNetworkFirewallRules(
                client, res.firewalls)

        if hasattr(res, 'firewallPolicys') and res.firewallPolicys:
            for fp in res.firewallPolicys:
                firewall_policy_rule = firewalls_utils.SortFirewallPolicyRules(
                    client, fp.rules)
                fp_response = (
                    client.messages.
                    RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponseEffectiveFirewallPolicy(
                        name=fp.name, rules=firewall_policy_rule,
                        type=fp.type))
                all_firewall_policy.append(fp_response)

        if args.IsSpecified('format') and args.format == 'json':
            return client.messages.RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponse(
                firewalls=network_firewall,
                firewallPolicys=all_firewall_policy)

        result = []
        for fp in all_firewall_policy:
            result.extend(
                firewalls_utils.ConvertFirewallPolicyRulesToEffectiveFwRules(
                    client,
                    fp,
                    True,
                    support_region_network_firewall_policy=True))
        result.extend(
            firewalls_utils.ConvertNetworkFirewallRulesToEffectiveFwRules(
                network_firewall))
        return result
    def Run(self, args):
        """Creates and runs an InstanceTemplates.Insert request.

    Args:
      args: argparse.Namespace, An object that contains the values for the
          arguments specified in the .Args() method.

    Returns:
      A resource object dispatched by display.Displayer().
    """
        holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
        client = holder.client

        self.ValidateDiskFlags(args)
        instances_flags.ValidateLocalSsdFlags(args)
        instances_flags.ValidateNicFlags(args)
        instances_flags.ValidateServiceAccountAndScopeArgs(args)
        instances_flags.ValidateAcceleratorArgs(args)

        boot_disk_size_gb = utils.BytesToGb(args.boot_disk_size)
        utils.WarnIfDiskSizeIsTooSmall(boot_disk_size_gb, args.boot_disk_type)

        instance_template_ref = (Create.InstanceTemplateArg.ResolveAsResource(
            args, holder.resources))

        metadata = metadata_utils.ConstructMetadataMessage(
            client.messages,
            metadata=args.metadata,
            metadata_from_file=args.metadata_from_file)

        if hasattr(args, 'network_interface') and args.network_interface:
            network_interfaces = (
                instance_template_utils.CreateNetworkInterfaceMessages)(
                    resources=holder.resources,
                    scope_lister=flags.GetDefaultScopeLister(client),
                    messages=client.messages,
                    network_interface_arg=args.network_interface,
                    region=args.region,
                    support_network_tier=self._support_network_tier)
        else:
            network_tier = getattr(args, 'network_tier', None)
            network_interfaces = [
                instance_template_utils.CreateNetworkInterfaceMessage(
                    resources=holder.resources,
                    scope_lister=flags.GetDefaultScopeLister(client),
                    messages=client.messages,
                    network=args.network,
                    region=args.region,
                    subnet=args.subnet,
                    address=(instance_template_utils.EPHEMERAL_ADDRESS
                             if not args.no_address and not args.address else
                             args.address),
                    network_tier=network_tier)
            ]

        scheduling = instance_utils.CreateSchedulingMessage(
            messages=client.messages,
            maintenance_policy=args.maintenance_policy,
            preemptible=args.preemptible,
            restart_on_failure=args.restart_on_failure)

        if args.no_service_account:
            service_account = None
        else:
            service_account = args.service_account
        service_accounts = instance_utils.CreateServiceAccountMessages(
            messages=client.messages,
            scopes=[] if args.no_scopes else args.scopes,
            service_account=service_account)

        create_boot_disk = not instance_utils.UseExistingBootDisk(args.disk
                                                                  or [])
        if create_boot_disk:
            image_expander = image_utils.ImageExpander(client,
                                                       holder.resources)
            try:
                image_uri, _ = image_expander.ExpandImageFlag(
                    user_project=instance_template_ref.project,
                    image=args.image,
                    image_family=args.image_family,
                    image_project=args.image_project,
                    return_image_resource=True)
            except utils.ImageNotFoundError as e:
                if args.IsSpecified('image_project'):
                    raise e
                image_uri, _ = image_expander.ExpandImageFlag(
                    user_project=instance_template_ref.project,
                    image=args.image,
                    image_family=args.image_family,
                    image_project=args.image_project,
                    return_image_resource=False)
                raise utils.ImageNotFoundError(
                    'The resource [{}] was not found. Is the image located in another '
                    'project? Use the --image-project flag to specify the '
                    'project where the image is located.'.format(image_uri))
        else:
            image_uri = None

        if args.tags:
            tags = client.messages.Tags(items=args.tags)
        else:
            tags = None

        persistent_disks = (
            instance_template_utils.CreatePersistentAttachedDiskMessages(
                client.messages, args.disk or []))

        persistent_create_disks = (
            instance_template_utils.CreatePersistentCreateDiskMessages(
                client, holder.resources, instance_template_ref.project,
                getattr(args, 'create_disk', [])))

        if create_boot_disk:
            boot_disk_list = [
                instance_template_utils.CreateDefaultBootAttachedDiskMessage(
                    messages=client.messages,
                    disk_type=args.boot_disk_type,
                    disk_device_name=args.boot_disk_device_name,
                    disk_auto_delete=args.boot_disk_auto_delete,
                    disk_size_gb=boot_disk_size_gb,
                    image_uri=image_uri)
            ]
        else:
            boot_disk_list = []

        local_ssds = []
        for x in args.local_ssd or []:
            local_ssd = instance_utils.CreateLocalSsdMessage(
                holder.resources, client.messages, x.get('device-name'),
                x.get('interface'), x.get('size'))
            local_ssds.append(local_ssd)

        disks = (boot_disk_list + persistent_disks + persistent_create_disks +
                 local_ssds)

        machine_type = instance_utils.InterpretMachineType(
            machine_type=args.machine_type,
            custom_cpu=args.custom_cpu,
            custom_memory=args.custom_memory,
            ext=getattr(args, 'custom_extensions', None))

        guest_accelerators = (
            instance_template_utils.CreateAcceleratorConfigMessages(
                client.messages, getattr(args, 'accelerator', None)))

        request = client.messages.ComputeInstanceTemplatesInsertRequest(
            instanceTemplate=client.messages.InstanceTemplate(
                properties=client.messages.InstanceProperties(
                    machineType=machine_type,
                    disks=disks,
                    canIpForward=args.can_ip_forward,
                    metadata=metadata,
                    networkInterfaces=network_interfaces,
                    serviceAccounts=service_accounts,
                    scheduling=scheduling,
                    tags=tags,
                    guestAccelerators=guest_accelerators,
                ),
                description=args.description,
                name=instance_template_ref.Name(),
            ),
            project=instance_template_ref.project)

        if getattr(args, 'min_cpu_platform', None):
            request.instanceTemplate.properties.minCpuPlatform = args.min_cpu_platform

        return client.MakeRequests([(client.apitools_client.instanceTemplates,
                                     'Insert', request)])
    def Run(self, args):
        instances_flags.ValidateDiskFlags(args, enable_kms=self._support_kms)
        instances_flags.ValidateLocalSsdFlags(args)
        instances_flags.ValidateNicFlags(args)
        instances_flags.ValidateServiceAccountAndScopeArgs(args)
        instances_flags.ValidateAcceleratorArgs(args)
        if self._support_network_tier:
            instances_flags.ValidateNetworkTierArgs(args)

        holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
        compute_client = holder.client
        resource_parser = holder.resources

        instance_refs = instance_utils.GetInstanceRefs(args, compute_client,
                                                       holder)

        requests = self._CreateRequests(args, instance_refs, compute_client,
                                        resource_parser, holder)

        if not args. async:
            # TODO(b/63664449): Replace this with poller + progress tracker.
            try:
                # Using legacy MakeRequests (which also does polling) here until
                # replaced by api_lib.utils.waiter.
                return compute_client.MakeRequests(requests)
            except exceptions.ToolException as e:
                invalid_machine_type_message_regex = (
                    r'Invalid value for field \'resource.machineType\': .+. '
                    r'Machine type with name \'.+\' does not exist in zone \'.+\'\.'
                )
                if re.search(invalid_machine_type_message_regex, e.message):
                    raise exceptions.ToolException(
                        e.message +
                        '\nUse `gcloud compute machine-types list --zones` to see the '
                        'available machine  types.')
                raise

        errors_to_collect = []
        responses = compute_client.BatchRequests(requests, errors_to_collect)
        for r in responses:
            err = getattr(r, 'error', None)
            if err:
                errors_to_collect.append(poller.OperationErrors(err.errors))
        if errors_to_collect:
            raise core_exceptions.MultiError(errors_to_collect)

        operation_refs = [
            holder.resources.Parse(r.selfLink) for r in responses
        ]

        for instance_ref, operation_ref in zip(instance_refs, operation_refs):
            log.status.Print(
                'Instance creation in progress for [{}]: {}'.format(
                    instance_ref.instance, operation_ref.SelfLink()))
        log.status.Print(
            'Use [gcloud compute operations describe URI] command '
            'to check the status of the operation(s).')
        if not args.IsSpecified('format'):
            # For async output we need a separate format. Since we already printed in
            # the status messages information about operations there is nothing else
            # needs to be printed.
            args.format = 'disable'
        return responses
예제 #25
0
 def _GetApiHolder(cls, no_http=False):
     return base_classes.ComputeApiHolder(cls.ReleaseTrack())
예제 #26
0
    def Run(self, args):
        """Issues requests necessary to update the HTTPS Health Checks."""
        holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
        client = holder.client

        if (args.check_interval is not None
                and (args.check_interval < CHECK_INTERVAL_LOWER_BOUND_SEC
                     or args.check_interval > CHECK_INTERVAL_UPPER_BOUND_SEC)):
            raise exceptions.ToolException(
                '[--check-interval] must not be less than {0} second or greater '
                'than {1} seconds; received [{2}] seconds.'.format(
                    CHECK_INTERVAL_LOWER_BOUND_SEC,
                    CHECK_INTERVAL_UPPER_BOUND_SEC, args.check_interval))

        if (args.timeout is not None
                and (args.timeout < TIMEOUT_LOWER_BOUND_SEC
                     or args.timeout > TIMEOUT_UPPER_BOUND_SEC)):
            raise exceptions.ToolException(
                '[--timeout] must not be less than {0} second or greater than {1} '
                'seconds; received: [{2}] seconds.'.format(
                    TIMEOUT_LOWER_BOUND_SEC, TIMEOUT_UPPER_BOUND_SEC,
                    args.timeout))

        if (args.healthy_threshold is not None
                and (args.healthy_threshold < THRESHOLD_LOWER_BOUND
                     or args.healthy_threshold > THRESHOLD_UPPER_BOUND)):
            raise exceptions.ToolException(
                '[--healthy-threshold] must be an integer between {0} and {1}, '
                'inclusive; received: [{2}].'.format(THRESHOLD_LOWER_BOUND,
                                                     THRESHOLD_UPPER_BOUND,
                                                     args.healthy_threshold))

        if (args.unhealthy_threshold is not None
                and (args.unhealthy_threshold < THRESHOLD_LOWER_BOUND
                     or args.unhealthy_threshold > THRESHOLD_UPPER_BOUND)):
            raise exceptions.ToolException(
                '[--unhealthy-threshold] must be an integer between {0} and {1}, '
                'inclusive; received [{2}].'.format(THRESHOLD_LOWER_BOUND,
                                                    THRESHOLD_UPPER_BOUND,
                                                    args.unhealthy_threshold))

        args_unset = not (args.port or args.request_path or args.check_interval
                          or args.timeout or args.healthy_threshold
                          or args.unhealthy_threshold)
        if args.description is None and args.host is None and args_unset:
            raise exceptions.ToolException(
                'At least one property must be modified.')

        https_health_check_ref = self.HTTPS_HEALTH_CHECKS_ARG.ResolveAsResource(
            args, holder.resources)
        get_request = self.GetGetRequest(client, https_health_check_ref)

        objects = client.MakeRequests([get_request])

        new_object = self.Modify(client, args, objects[0])

        # If existing object is equal to the proposed object or if
        # Modify() returns None, then there is no work to be done, so we
        # print the resource and return.
        if objects[0] == new_object:
            log.status.Print(
                'No change requested; skipping update for [{0}].'.format(
                    objects[0].name))
            return objects

        return client.MakeRequests(
            [self.GetSetRequest(client, https_health_check_ref, new_object)])
예제 #27
0
    def Run(self, args):
        """See base.UpdateCommand."""

        holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
        messages = holder.client.messages
        service = holder.client.apitools_client.routers

        router_ref = self.ROUTER_ARG.ResolveAsResource(args, holder.resources)

        request_type = messages.ComputeRoutersGetRequest
        replacement = service.Get(request_type(**router_ref.AsDict()))

        peer = _CreateBgpPeer(messages, args)

        if router_utils.HasReplaceAdvertisementFlags(args):
            mode, groups, prefixes = router_utils.ParseAdvertisements(
                messages=messages,
                resource_class=messages.RouterBgpPeer,
                args=args)

            attrs = {
                'advertiseMode': mode,
                'advertisedGroups': groups,
                'advertisedPrefixs': prefixes,
            }

            for attr, value in attrs.iteritems():
                if value is not None:
                    setattr(peer, attr, value)

        replacement.bgpPeers.append(peer)

        result = service.Patch(
            messages.ComputeRoutersPatchRequest(project=router_ref.project,
                                                region=router_ref.region,
                                                router=router_ref.Name(),
                                                routerResource=replacement))

        operation_ref = resources.REGISTRY.Parse(
            result.name,
            collection='compute.regionOperations',
            params={
                'project': router_ref.project,
                'region': router_ref.region,
            })

        if args. async:
            log.UpdatedResource(
                operation_ref,
                kind='router [{0}] to add peer [{1}]'.format(
                    router_ref.Name(), peer.name),
                async=True,
                details='Run the [gcloud compute operations describe] command '
                'to check the status of this operation.')
            return result

        target_router_ref = holder.resources.Parse(
            router_ref.Name(),
            collection='compute.routers',
            params={
                'project': router_ref.project,
                'region': router_ref.region,
            })

        operation_poller = poller.Poller(service, target_router_ref)
        return waiter.WaitFor(
            operation_poller, operation_ref,
            'Creating peer [{0}] in router [{1}]'.format(
                peer.name, router_ref.Name()))
예제 #28
0
    def Run(self, args):
        # Manually ensure replace/incremental flags are mutually exclusive.
        router_utils.CheckIncompatibleFlagsOrRaise(args)

        holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
        messages = holder.client.messages
        service = holder.client.apitools_client.routers

        ref = self.ROUTER_ARG.ResolveAsResource(args, holder.resources)

        request_type = messages.ComputeRoutersGetRequest
        existing = service.Get(request_type(**ref.AsDict()))
        replacement = copy.deepcopy(existing)

        if router_utils.HasReplaceAdvertisementFlags(args):
            mode, groups, prefixes = router_utils.ParseAdvertisements(
                messages=messages,
                resource_class=messages.RouterBgp,
                args=args)

            router_utils.PromptIfSwitchToDefaultMode(
                messages=messages,
                resource_class=messages.RouterBgp,
                existing_mode=existing.bgp.advertiseMode,
                new_mode=mode)

            attrs = {
                'advertiseMode': mode,
                'advertisedGroups': groups,
                'advertisedPrefixs': prefixes,
            }

            for attr, value in attrs.items():
                if value is not None:
                    setattr(replacement.bgp, attr, value)

        if router_utils.HasIncrementalAdvertisementFlags(args):
            # This operation should only be run on custom mode routers.
            router_utils.ValidateCustomMode(messages=messages,
                                            resource_class=messages.RouterBgp,
                                            resource=replacement.bgp)

            # These arguments are guaranteed to be mutually exclusive in args.
            if args.add_advertisement_groups:
                groups_to_add = router_utils.ParseGroups(
                    resource_class=messages.RouterBgp,
                    groups=args.add_advertisement_groups)
                replacement.bgp.advertisedGroups.extend(groups_to_add)

            if args.remove_advertisement_groups:
                groups_to_remove = router_utils.ParseGroups(
                    resource_class=messages.RouterBgp,
                    groups=args.remove_advertisement_groups)
                router_utils.RemoveGroupsFromAdvertisements(
                    messages=messages,
                    resource_class=messages.RouterBgp,
                    resource=replacement.bgp,
                    groups=groups_to_remove)

            if args.add_advertisement_ranges:
                ip_ranges_to_add = router_utils.ParseIpRanges(
                    messages=messages, ip_ranges=args.add_advertisement_ranges)
                replacement.bgp.advertisedPrefixs.extend(ip_ranges_to_add)

            if args.remove_advertisement_ranges:
                router_utils.RemoveIpRangesFromAdvertisements(
                    messages=messages,
                    resource_class=messages.RouterBgp,
                    resource=replacement.bgp,
                    ip_ranges=args.remove_advertisement_ranges)

        request_type = messages.ComputeRoutersPatchRequest
        include_fields = [
            'bgp.advertisedGroups',
            'bgp.advertisedPrefixs',
        ]
        with holder.client.apitools_client.IncludeFields(include_fields):
            resource = service.Patch(
                request_type(project=ref.project,
                             region=ref.region,
                             router=ref.Name(),
                             routerResource=replacement))

        return resource
예제 #29
0
    def _Run(self,
             args,
             supports_kms_keys=False,
             supports_physical_block=False,
             support_shared_disk=False,
             support_vss_erase=False):
        compute_holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
        client = compute_holder.client

        self.show_unformated_message = not (
            args.IsSpecified('image') or args.IsSpecified('image_family')
            or args.IsSpecified('source_snapshot'))
        if self.source_disk_enabled:
            self.show_unformated_message = self.show_unformated_message and not (
                args.IsSpecified('source_disk'))

        disk_refs = self.ValidateAndParseDiskRefs(args, compute_holder)
        from_image = self.GetFromImage(args)
        size_gb = self.GetDiskSizeGb(args, from_image)
        self.WarnAboutScopeDeprecationsAndMaintenance(disk_refs, client)
        project_to_source_image = self.GetProjectToSourceImageDict(
            args, disk_refs, compute_holder, from_image)
        snapshot_uri = self.GetSnapshotUri(args, compute_holder)

        # Those features are only exposed in alpha/beta, it would be nice to have
        # code supporting them only in alpha and beta versions of the command.
        labels = self.GetLabels(args, client)

        allow_rsa_encrypted = self.ReleaseTrack() in [
            base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA
        ]
        csek_keys = csek_utils.CsekKeyStore.FromArgs(args, allow_rsa_encrypted)

        for project in project_to_source_image:
            source_image_uri = project_to_source_image[project].uri
            project_to_source_image[project].keys = (
                csek_utils.MaybeLookupKeyMessagesByUri(
                    csek_keys, compute_holder.resources,
                    [source_image_uri, snapshot_uri], client.apitools_client))

        # end of alpha/beta features.

        guest_os_feature_messages = _ParseGuestOsFeaturesToMessages(
            args, client.messages)

        requests = []
        for disk_ref in disk_refs:
            type_uri = self.GetDiskTypeUri(args, disk_ref, compute_holder)

            # Those features are only exposed in alpha/beta, it would be nice to have
            # code supporting them only in alpha and beta versions of the command.
            # TODO(b/65161039): Stop checking release path in the middle of GA code.
            kwargs = {}
            if csek_keys:
                disk_key_or_none = csek_keys.LookupKey(
                    disk_ref, args.require_csek_key_create)
                disk_key_message_or_none = csek_utils.MaybeToMessage(
                    disk_key_or_none, client.apitools_client)
                kwargs['diskEncryptionKey'] = disk_key_message_or_none
                kwargs['sourceImageEncryptionKey'] = (
                    project_to_source_image[disk_ref.project].keys[0])
                kwargs['sourceSnapshotEncryptionKey'] = (
                    project_to_source_image[disk_ref.project].keys[1])
            if labels:
                kwargs['labels'] = labels

            if supports_kms_keys:
                kwargs['diskEncryptionKey'] = kms_utils.MaybeGetKmsKey(
                    args, client.messages, kwargs.get('diskEncryptionKey',
                                                      None))

            # end of alpha/beta features.

            if supports_physical_block and args.IsSpecified(
                    'physical_block_size'):
                physical_block_size_bytes = int(args.physical_block_size)
            else:
                physical_block_size_bytes = None

            resource_policies = getattr(args, 'resource_policies', None)
            if resource_policies:
                if disk_ref.Collection() == 'compute.regionDisks':
                    disk_region = disk_ref.region
                else:
                    disk_region = utils.ZoneNameToRegionName(disk_ref.zone)
                parsed_resource_policies = []
                for policy in resource_policies:
                    resource_policy_ref = resource_util.ParseResourcePolicy(
                        compute_holder.resources,
                        policy,
                        project=disk_ref.project,
                        region=disk_region)
                    parsed_resource_policies.append(
                        resource_policy_ref.SelfLink())
                kwargs['resourcePolicies'] = parsed_resource_policies

            disk = client.messages.Disk(
                name=disk_ref.Name(),
                description=args.description,
                sizeGb=size_gb,
                sourceSnapshot=snapshot_uri,
                type=type_uri,
                physicalBlockSizeBytes=physical_block_size_bytes,
                **kwargs)
            if self.source_disk_enabled:
                source_disk_ref = self.GetSourceDiskUri(args, compute_holder)
                disk.sourceDisk = source_disk_ref
            if (support_shared_disk
                    and disk_ref.Collection() == 'compute.regionDisks'
                    and args.IsSpecified('multi_writer')):
                raise exceptions.InvalidArgumentException(
                    '--multi-writer',
                    ('--multi-writer can be used only with --zone flag'))

            if (support_shared_disk
                    and disk_ref.Collection() == 'compute.disks'
                    and args.IsSpecified('multi_writer')):
                disk.multiWriter = args.multi_writer

            if guest_os_feature_messages:
                disk.guestOsFeatures = guest_os_feature_messages

            if support_vss_erase and args.IsSpecified(
                    'erase_windows_vss_signature'):
                disk.eraseWindowsVssSignature = args.erase_windows_vss_signature

            disk.licenses = self.ParseLicenses(args)

            if disk_ref.Collection() == 'compute.disks':
                request = client.messages.ComputeDisksInsertRequest(
                    disk=disk,
                    project=disk_ref.project,
                    sourceImage=project_to_source_image[disk_ref.project].uri,
                    zone=disk_ref.zone)

                request = (client.apitools_client.disks, 'Insert', request)
            elif disk_ref.Collection() == 'compute.regionDisks':
                disk.replicaZones = self.GetReplicaZones(
                    args, compute_holder, disk_ref)
                request = client.messages.ComputeRegionDisksInsertRequest(
                    disk=disk,
                    project=disk_ref.project,
                    sourceImage=project_to_source_image[disk_ref.project].uri,
                    region=disk_ref.region)

                request = (client.apitools_client.regionDisks, 'Insert',
                           request)

            requests.append(request)

        return client.MakeRequests(requests)
예제 #30
0
    def _SendRequests(self,
                      args,
                      quic_override=None,
                      ssl_policy=None,
                      clear_ssl_policy=False):
        holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
        client = holder.client

        requests = []
        target_https_proxy_ref = self.TARGET_HTTPS_PROXY_ARG.ResolveAsResource(
            args, holder.resources)

        if args.ssl_certificates:
            ssl_cert_refs = self.SSL_CERTIFICATES_ARG.ResolveAsResource(
                args, holder.resources)
            requests.append(
                (client.apitools_client.targetHttpsProxies,
                 'SetSslCertificates',
                 client.messages.
                 ComputeTargetHttpsProxiesSetSslCertificatesRequest(
                     project=target_https_proxy_ref.project,
                     targetHttpsProxy=target_https_proxy_ref.Name(),
                     targetHttpsProxiesSetSslCertificatesRequest=(
                         client.messages.
                         TargetHttpsProxiesSetSslCertificatesRequest(
                             sslCertificates=[
                                 ref.SelfLink() for ref in ssl_cert_refs
                             ])))))

        if args.url_map:
            url_map_ref = self.URL_MAP_ARG.ResolveAsResource(
                args, holder.resources)
            requests.append(
                (client.apitools_client.targetHttpsProxies, 'SetUrlMap',
                 client.messages.ComputeTargetHttpsProxiesSetUrlMapRequest(
                     project=target_https_proxy_ref.project,
                     targetHttpsProxy=target_https_proxy_ref.Name(),
                     urlMapReference=client.messages.UrlMapReference(
                         urlMap=url_map_ref.SelfLink()))))

        if quic_override:
            requests.append(
                (client.apitools_client.targetHttpsProxies, 'SetQuicOverride',
                 client.messages.
                 ComputeTargetHttpsProxiesSetQuicOverrideRequest(
                     project=target_https_proxy_ref.project,
                     targetHttpsProxy=target_https_proxy_ref.Name(),
                     targetHttpsProxiesSetQuicOverrideRequest=(
                         client.messages.
                         TargetHttpsProxiesSetQuicOverrideRequest(
                             quicOverride=quic_override)))))

        if ssl_policy or clear_ssl_policy:
            requests.append(
                (client.apitools_client.targetHttpsProxies, 'SetSslPolicy',
                 client.messages.ComputeTargetHttpsProxiesSetSslPolicyRequest(
                     project=target_https_proxy_ref.project,
                     targetHttpsProxy=target_https_proxy_ref.Name(),
                     sslPolicyReference=ssl_policy)))

        return client.MakeRequests(requests)