def __init__(self, defaults=None, symbols=None, compiler=None): """Initializes a projection. Args: defaults: resource_projection_spec.ProjectionSpec defaults. symbols: Transform function symbol table dict indexed by function name. compiler: The projection compiler method for nested projections. """ self.aliases = {} self.attributes = {} self._columns = [] self._compiler = compiler self._empty = None self._name = None self._snake_headings = {} self._snake_re = None if defaults: self._active = defaults.active self._tree = copy.deepcopy(defaults.GetRoot()) self.Defaults() self.symbols = copy.deepcopy(symbols) if symbols else {} if defaults.symbols: self.symbols.update(defaults.symbols) self.aliases.update(defaults.aliases) else: self._active = 0 self._tree = None self.symbols = symbols
def Modify(self, args, existing): replacement = copy.deepcopy(existing) if args.description: replacement.description = args.description elif args.description is not None: replacement.description = None health_checks = backend_services_utils.GetHealthChecks(args, self) if health_checks: replacement.healthChecks = health_checks if args.timeout: replacement.timeoutSec = args.timeout if args.port: replacement.port = args.port if args.port_name: replacement.portName = args.port_name if args.protocol: replacement.protocol = (self.messages.BackendService .ProtocolValueValuesEnum(args.protocol)) return replacement
def Modify(self, args, existing): replacement = copy.deepcopy(existing) if args.disk: disk_ref = self.CreateZonalReference( args.disk, self.ref.zone, resource_type='disks') replacement.disks = [disk for disk in existing.disks if disk.source != disk_ref.SelfLink()] if len(existing.disks) == len(replacement.disks): raise exceptions.ToolException( 'Disk [{0}] is not attached to instance [{1}] in zone [{2}].' .format(disk_ref.Name(), self.ref.Name(), self.ref.zone)) else: replacement.disks = [disk for disk in existing.disks if disk.deviceName != args.device_name] if len(existing.disks) == len(replacement.disks): raise exceptions.ToolException( 'No disk with device name [{0}] is attached to instance [{1}] in ' 'zone [{2}].' .format(args.device_name, self.ref.Name(), self.ref.zone)) return replacement
def Modify(self, args, existing): replacement = copy.deepcopy(existing) disk_found = False if args.disk: disk_ref = self.CreateZonalReference( args.disk, self.ref.zone, resource_type='disks') for disk in replacement.disks: if disk.source == disk_ref.SelfLink(): disk.autoDelete = args.auto_delete disk_found = True if not disk_found: raise exceptions.ToolException( 'Disk [{0}] is not attached to instance [{1}] in zone [{2}].' .format(disk_ref.Name(), self.ref.Name(), self.ref.zone)) else: for disk in replacement.disks: if disk.deviceName == args.device_name: disk.autoDelete = args.auto_delete disk_found = True if not disk_found: raise exceptions.ToolException( 'No disk with device name [{0}] is attached to instance [{1}] ' 'in zone [{2}].' .format(args.device_name, self.ref.Name(), self.ref.zone)) return replacement
def Modify(self, args, existing): new_object = copy.deepcopy(existing) if args.all: new_object.tags.items = [] else: new_object.tags.items = sorted(set(new_object.tags.items) - set(args.tags)) return new_object
def Modify(self, args, existing): backend_flags.WarnOnDeprecatedFlags(args) replacement = copy.deepcopy(existing) group_ref = self.CreateGroupReference(args) group_uri = group_ref.SelfLink() for backend in existing.backends: if group_uri == backend.group: raise exceptions.ToolException( 'Backend [{0}] in zone [{1}] already exists in backend service ' '[{2}].'.format(group_ref.Name(), group_ref.zone, args.name)) if args.balancing_mode: balancing_mode = self.messages.Backend.BalancingModeValueValuesEnum( args.balancing_mode) else: balancing_mode = None backend = self.CreateBackendMessage(group_uri, balancing_mode, args) replacement.backends.append(backend) return replacement
def Modify(self, args, existing): replacement = copy.deepcopy(existing) group_ref = self.CreateGroupReference(args) group_uri = group_ref.SelfLink() for backend in existing.backends: if group_uri == backend.group: raise exceptions.ToolException( 'Backend [{0}] in zone [{1}] already exists in backend service ' '[{2}].'.format(args.instance_group, args.zone, args.name)) if args.balancing_mode: balancing_mode = self.messages.Backend.BalancingModeValueValuesEnum( args.balancing_mode) else: balancing_mode = None backend = self.messages.Backend( balancingMode=balancing_mode, capacityScaler=args.capacity_scaler, description=args.description, group=group_uri, maxRate=args.max_rate, maxRatePerInstance=args.max_rate_per_instance, maxUtilization=args.max_utilization) replacement.backends.append(backend) return replacement
def Clone(self): """Fully clones this registry.""" reg = Registry( parsers_by_collection=_CopyNestedDictSpine(self.parsers_by_collection), parsers_by_url=_CopyNestedDictSpine(self.parsers_by_url), default_param_funcs=_CopyNestedDictSpine(self.default_param_funcs), registered_apis=copy.deepcopy(self.registered_apis)) for _, version_collections in reg.parsers_by_collection.iteritems(): for _, collection_parsers in version_collections.iteritems(): for _, parser in collection_parsers.iteritems(): parser.params_defaults_func = functools.partial( reg.GetParamDefault, parser.collection_info.api_name, parser.collection_info.name) def _UpdateParser(dict_or_parser): if type(dict_or_parser) is types.DictType: for _, val in dict_or_parser.iteritems(): _UpdateParser(val) else: dict_or_parser.params_defaults_func = functools.partial( reg.GetParamDefault, dict_or_parser.collection_info.api_name, dict_or_parser.collection_info.name) _UpdateParser(reg.parsers_by_url) return reg
def Modify(self, args, existing): replacement = copy.deepcopy(existing) iface = None for i in replacement.interfaces: if i.name == args.interface_name: iface = i break if iface is None: raise InterfaceNotFoundError(args.interface_name) if args.mask_length is not None: if args.mask_length < 0 or args.mask_length > 31: raise exceptions.Error( '--mask-length must be a non-negative integer less than 32') if args.ip_address is not None: if args.mask_length is None: raise RequireMaskError() iface.ipRange = '{0}/{1}'.format(args.ip_address, args.mask_length) if args.vpn_tunnel is not None: vpn_ref = self.CreateRegionalReference( args.vpn_tunnel, args.region, resource_type='vpnTunnels') iface.linkedVpnTunnel = vpn_ref.SelfLink() return replacement
def Modify(self, args, existing): replacement = copy.deepcopy(existing) if args.disk: disk_ref = self.resources.Parse( args.disk, collection='compute.disks', params={'zone': self.ref.zone}) replacement.disks = [disk for disk in existing.disks if disk.source != disk_ref.SelfLink()] if len(existing.disks) == len(replacement.disks): raise exceptions.ToolException( 'Disk [{0}] is not attached to instance [{1}] in zone [{2}].' .format(disk_ref.Name(), self.ref.Name(), self.ref.zone)) else: replacement.disks = [disk for disk in existing.disks if disk.deviceName != args.device_name] if len(existing.disks) == len(replacement.disks): raise exceptions.ToolException( 'No disk with device name [{0}] is attached to instance [{1}] in ' 'zone [{2}].' .format(args.device_name, self.ref.Name(), self.ref.zone)) return replacement
def Modify(self, args, existing): backend_flags.WarnOnDeprecatedFlags(args) replacement = copy.deepcopy(existing) group_ref = self.CreateGroupReference(args) group_uri = group_ref.SelfLink() backend_idx = None for i, backend in enumerate(existing.backends): if group_uri == backend.group: backend_idx = i if backend_idx is None: scope_value = getattr(group_ref, 'region', None) if scope_value is None: scope_value = getattr(group_ref, 'zone', None) scope = 'zone' else: scope = 'region' raise exceptions.ToolException( 'Backend [{0}] in {1} [{2}] is not a backend of backend service ' '[{3}].'.format(group_ref.Name(), scope, scope_value, self.ref.Name())) else: replacement.backends.pop(backend_idx) return replacement
def Modify(self, args, existing): replacement = copy.deepcopy(existing) mask = None # by convention the interface name will be if-name_of_peer interface_name = 'if-' + args.peer_name if args.ip_address is not None: if args.mask_length is not None: mask = '{0}/{1}'.format(args.ip_address, args.mask_length) else: raise exceptions.ToolException( '--mask-length must be set if --ip-address is set') peer = self.messages.RouterBgpPeer( name=args.peer_name, interfaceName=interface_name, ipAddress=args.ip_address, peerIpAddress=args.peer_ip_address, peerAsn=args.peer_asn) vpn_ref = self.CreateRegionalReference( args.vpn_tunnel, args.region, resource_type='vpnTunnels') interface = self.messages.RouterInterface( name=interface_name, linkedVpnTunnel=vpn_ref.SelfLink(), ipRange=mask) replacement.bgpPeers.append(peer) replacement.interfaces.append(interface) return replacement
def Modify(self, args, existing): """Returns a modified URL map message.""" replacement = copy.deepcopy(existing) # Removes the path matcher. new_path_matchers = [] path_matcher_found = False for path_matcher in existing.pathMatchers: if path_matcher.name == args.path_matcher_name: path_matcher_found = True else: new_path_matchers.append(path_matcher) if not path_matcher_found: raise exceptions.ToolException( 'No path matcher with the name [{0}] was found.'.format( args.path_matcher_name)) replacement.pathMatchers = new_path_matchers # Removes all host rules that refer to the path matcher. new_host_rules = [] for host_rule in existing.hostRules: if host_rule.pathMatcher != args.path_matcher_name: new_host_rules.append(host_rule) replacement.hostRules = new_host_rules return replacement
def Modify(self, args, existing): replacement = copy.deepcopy(existing) peer = None for p in replacement.bgpPeers: if p.name == args.peer_name: peer = p break if peer is None: raise PeerNotFoundError(args.peer_name) attrs = { "interfaceName": args.interface, "ipAddress": args.ip_address, "peerIpAddress": args.peer_ip_address, "peerAsn": args.peer_asn, "advertisedRoutePriority": args.advertised_route_priority, } for attr, value in attrs.items(): if value is not None: setattr(peer, attr, value) return replacement
def Modify(self, args, existing): replacement = copy.deepcopy(existing) mask = None interface_name = args.interface_name if args.mask_length is not None: if args.mask_length < 0 or args.mask_length > 31: raise exceptions.ToolException( '--mask-length must be a non-negative integer less than 32') if args.ip_address is not None: if args.mask_length is not None: mask = '{0}/{1}'.format(args.ip_address, args.mask_length) else: raise exceptions.ToolException( '--ip-address', '--mask-length must be set if --ip-address is set') vpn_ref = self.CreateRegionalReference( args.vpn_tunnel, args.region, resource_type='vpnTunnels') interface = self.messages.RouterInterface( name=interface_name, linkedVpnTunnel=vpn_ref.SelfLink(), ipRange=mask) replacement.interfaces.append(interface) return replacement
def Modify(self, args, existing): """Override. See base class, ReadWriteCommand.""" backend_flags.WarnOnDeprecatedFlags(args) replacement = copy.deepcopy(existing) group_ref = self.CreateGroupReference(args) backend_to_update = None for backend in replacement.backends: if group_ref.SelfLink() == backend.group: backend_to_update = backend if not backend_to_update: scope_type = None scope_name = None if hasattr(group_ref, 'zone'): scope_type = 'zone' scope_name = group_ref.zone if hasattr(group_ref, 'region'): scope_type = 'region' scope_name = group_ref.region raise exceptions.ToolException( 'No backend with name [{0}] in {1} [{2}] is part of the backend ' 'service [{3}].'.format( group_ref.Name(), scope_type, scope_name, self.ref.Name())) if args.description: backend_to_update.description = args.description elif args.description is not None: backend_to_update.description = None self.ModifyBalancingModeArgs(args, backend_to_update) return replacement
def Modify(self, args, existing): replacement = copy.deepcopy(existing) disk_found = False if args.disk: disk_ref = self.resources.Parse( args.disk, collection='compute.disks', params={'zone': self.ref.zone}) for disk in replacement.disks: if disk.source == disk_ref.SelfLink(): disk.autoDelete = args.auto_delete disk_found = True if not disk_found: raise exceptions.ToolException( 'Disk [{0}] is not attached to instance [{1}] in zone [{2}].' .format(disk_ref.Name(), self.ref.Name(), self.ref.zone)) else: for disk in replacement.disks: if disk.deviceName == args.device_name: disk.autoDelete = args.auto_delete disk_found = True if not disk_found: raise exceptions.ToolException( 'No disk with device name [{0}] is attached to instance [{1}] ' 'in zone [{2}].' .format(args.device_name, self.ref.Name(), self.ref.zone)) return replacement
def Modify(self, args, existing): replacement = copy.deepcopy(existing) group_ref = None if args.group is not None: log.warn('The --group flag is deprecated and will be removed. ' 'Please use --instance-group instead.') group_ref = self.CreateZonalReference( args.group, args.zone, resource_type='zoneViews') else: group_ref = self.CreateGroupReference(args) group_uri = group_ref.SelfLink() backend_idx = None for i, backend in enumerate(existing.backends): if group_uri == backend.group: backend_idx = i if backend_idx is None: raise exceptions.ToolException( 'Backend [{0}] in zone [{1}] is not a backend of backend service ' '[{2}].'.format(args.group, args.zone, args.name)) else: replacement.backends.pop(backend_idx) return replacement
def Modify(self, args, existing): """Returns a modified URL map message.""" replacement = copy.deepcopy(existing) default_service_uri = self.CreateGlobalReference( args.default_service, resource_type='backendServices').SelfLink() replacement.defaultService = default_service_uri return replacement
def Modify(self, args, existing): new_object = copy.deepcopy(existing) # Do not re-order the items if the object won't change, or the objects # will not be considered equal and an unnecessary API call will be made. new_tags = set(new_object.tags.items + args.tags) if new_tags != set(new_object.tags.items): new_object.tags.items = sorted(new_tags) return new_object
def YieldFromList( service, request, limit=None, batch_size=100, method="List", field="items", predicate=None, current_token_attribute="pageToken", next_token_attribute="nextPageToken", ): """Make a series of List requests, keeping track of page tokens. Args: service: apitools_base.BaseApiService, A service with a .List() method. request: protorpc.messages.Message, The request message corresponding to the service's .List() method, with all the attributes populated except the .maxResults and .pageToken attributes. limit: int, The maximum number of records to yield. None if all available records should be yielded. batch_size: int, The number of items to retrieve per request. method: str, The name of the method used to fetch resources. field: str, The field in the response that will be a list of items. predicate: lambda, A function that returns true for items to be yielded. current_token_attribute: str, The name of the attribute in a request message holding the page token for the page being requested. next_token_attribute: str, The name of the attribute in a response message holding the page token for the next page. Yields: protorpc.message.Message, The resources listed by the service. """ request = copy.deepcopy(request) request.pageSize = batch_size request.pageToken = None while limit is None or limit: try: response = getattr(service, method)(request) except apitools_base.HttpError as error: raise calliope_exceptions.HttpException("RPC Failed: {0}".format(dataflow_util.GetErrorMessage(error))) items = getattr(response, field) if predicate: items = filter(predicate, items) for item in items: yield item if limit is None: continue limit -= 1 if not limit: return token = getattr(response, next_token_attribute) if not token: return setattr(request, current_token_attribute, token)
def _DictToOrderedDict(obj): """Recursively converts a JSON-serializable dict to an OrderedDict.""" if isinstance(obj, dict): new_obj = collections.OrderedDict(sorted(obj.items())) for key, value in new_obj.iteritems(): new_obj[key] = _DictToOrderedDict(value) return new_obj elif isinstance(obj, list): return [_DictToOrderedDict(item) for item in obj] else: return copy.deepcopy(obj)
def Modify(self, args, existing): replacement = copy.deepcopy(existing) if args.description: replacement.description = args.description elif args.description == "": # pylint: disable=g-explicit-bool-comparison replacement.description = None if args.gcs_bucket_name: replacement.bucketName = args.gcs_bucket_name return replacement
def Modify(self, args, existing): """Returns a modified URL map message.""" replacement = copy.deepcopy(existing) new_host_rule = self.messages.HostRule( description=args.description, hosts=sorted(args.hosts), pathMatcher=args.path_matcher_name) replacement.hostRules.append(new_host_rule) return replacement
def Modify(self, args, existing): replacement = copy.deepcopy(existing) peer = self.messages.RouterBgpPeer( name=args.peer_name, interfaceName=args.interface, peerIpAddress=args.peer_ip_address, peerAsn=args.peer_asn, advertisedRoutePriority=args.advertised_route_priority) replacement.bgpPeers.append(peer) return replacement
def Modify(self, args, existing): new_object = copy.deepcopy(existing) existing_metadata = getattr(existing, 'metadata', None) new_metadata = metadata_utils.ConstructMetadataMessage( message_classes=self.messages, metadata={ METADATA_KEY: self._UpdateWindowsKeysValue(existing_metadata)}, existing_metadata=existing_metadata) new_object.metadata = new_metadata return new_object
def Modify(self, args, existing): replacement = copy.deepcopy(existing) # remove peer if exists peer = None for p in replacement.bgpPeers: if p.name == args.peer_name: peer = p replacement.bgpPeers.remove(peer) break if peer is None: raise PeerNotFoundError(args.peer_name) return replacement
def Modify(self, args, existing): replacement = copy.deepcopy(existing) # remove interface if exists interface = None for i in replacement.interfaces: if i.name == args.interface_name: interface = i replacement.interfaces.remove(interface) break if interface is None: raise InterfaceNotFoundError(args.interface_name) return replacement
def Modify(self, args, existing): new_object = copy.deepcopy(existing) existing_metadata = getattr(existing, self.metadata_field, None) setattr(new_object, self.metadata_field, metadata_utils.RemoveEntries( self.messages, existing_metadata=existing_metadata, keys=args.keys, remove_all=args.all)) if metadata_utils.MetadataEqual( existing_metadata, getattr(new_object, self.metadata_field, None)): return None else: return new_object
def Modify(self, args, existing): new_object = copy.deepcopy(existing) existing_metadata = getattr(existing, self.metadata_field, None) setattr( new_object, self.metadata_field, metadata_utils.ConstructMetadataMessage( self.messages, metadata=args.metadata, metadata_from_file=args.metadata_from_file, existing_metadata=existing_metadata)) if metadata_utils.MetadataEqual( existing_metadata, getattr(new_object, self.metadata_field, None)): return None else: return new_object
def Modify(self, args, existing): """Returns a modified URL map message.""" replacement = copy.deepcopy(existing) path_matcher_to_remove = None new_host_rules = [] for host_rule in existing.hostRules: if args.host in host_rule.hosts: path_matcher_to_remove = host_rule.pathMatcher else: new_host_rules.append(host_rule) if not path_matcher_to_remove: raise exceptions.ToolException( 'No host rule contains the host [{0}].'.format(args.host)) replacement.hostRules = new_host_rules path_matcher_is_used_by_other_rules = False for host_rule in replacement.hostRules: if host_rule.pathMatcher == path_matcher_to_remove: path_matcher_is_used_by_other_rules = True break if not path_matcher_is_used_by_other_rules: if args.delete_orphaned_path_matcher: replacement.pathMatchers = [ path_matcher for path_matcher in existing.pathMatchers if path_matcher.name != path_matcher_to_remove ] else: raise exceptions.ToolException( 'This operation will orphan the path matcher [{0}]. To ' 'delete the orphan path matcher, rerun this command with ' '[--delete-orphaned-path-matcher] or use [gcloud compute ' 'url-maps edit] to modify the URL map by hand.'.format( host_rule.pathMatcher)) return replacement
def Modify(self, args, existing): replacement = copy.deepcopy(existing) group_ref = None if args.group is not None: log.warn('The --group flag is deprecated and will be removed. ' 'Please use --instance-group instead.') group_ref = self.CreateZonalReference(args.group, args.zone, resource_type='zoneViews') else: group_ref = self.CreateZonalReference( args.instance_group, args.zone, resource_type='instanceGroups') group_uri = group_ref.SelfLink() for backend in existing.backends: if group_uri == backend.group: raise exceptions.ToolException( 'Backend [{0}] in zone [{1}] already exists in backend service ' '[{2}].'.format(args.group, args.zone, args.name)) if args.balancing_mode: balancing_mode = self.messages.Backend.BalancingModeValueValuesEnum( args.balancing_mode) else: balancing_mode = None backend = self.messages.Backend( balancingMode=balancing_mode, capacityScaler=args.capacity_scaler, description=args.description, group=group_uri, maxRate=args.max_rate, maxRatePerInstance=args.max_rate_per_instance, maxUtilization=args.max_utilization) replacement.backends.append(backend) return replacement
def Modify(self, args, existing): replacement = copy.deepcopy(existing) if args.description: replacement.description = args.description elif args.description is not None: replacement.description = None health_checks = backend_services_utils.GetHealthChecks(args, self) if health_checks: replacement.healthChecks = health_checks if args.timeout: replacement.timeoutSec = args.timeout if args.port: replacement.port = args.port if args.port_name: replacement.portName = args.port_name if args.protocol: replacement.protocol = (self.messages.BackendService .ProtocolValueValuesEnum(args.protocol)) if args.enable_cdn is not None: replacement.enableCDN = args.enable_cdn if args.session_affinity is not None: replacement.sessionAffinity = ( self.messages.BackendService.SessionAffinityValueValuesEnum( args.session_affinity)) if args.affinity_cookie_ttl is not None: replacement.affinityCookieTtlSec = args.affinity_cookie_ttl return replacement
def _Clone(self): return Registry( parsers_by_collection=_CopyNestedDictSpine(self.parsers_by_collection), parsers_by_url=_CopyNestedDictSpine(self.parsers_by_url), default_param_funcs=_CopyNestedDictSpine(self.default_param_funcs), registered_apis=copy.deepcopy(self.registered_apis))
def Get(self, obj): """Returns the property in obj or None if the property does not exist.""" return copy.deepcopy(_GetProperty(obj, self._compiled_property))
def Modify(self, args, existing): replacement = copy.deepcopy(existing) group_ref = None if args.group is not None: log.warn('The --group flag is deprecated and will be removed. ' 'Please use --instance-group instead.') group_ref = self.CreateZonalReference(args.group, args.zone, resource_type='zoneViews') else: group_ref = self.CreateGroupReference(args) backend_to_update = None for backend in replacement.backends: if group_ref.SelfLink() == backend.group: backend_to_update = backend if not backend_to_update: scope_type = None scope_name = None if hasattr(group_ref, 'zone'): scope_type = 'zone' scope_name = group_ref.zone if hasattr(group_ref, 'region'): scope_type = 'region' scope_name = group_ref.region raise exceptions.ToolException( 'No backend with name [{0}] in {1} [{2}] is part of the backend ' 'service [{3}].'.format(group_ref.Name(), scope_type, scope_name, self.ref.Name())) if args.description: backend_to_update.description = args.description elif args.description is not None: backend_to_update.description = None if args.balancing_mode: backend_to_update.balancingMode = ( self.messages.Backend.BalancingModeValueValuesEnum( args.balancing_mode)) # If the balancing mode is being changed to RATE, we must clear # the max utilization field, otherwise the server will reject # the request. if (backend_to_update.balancingMode == self.messages.Backend.BalancingModeValueValuesEnum.RATE): backend_to_update.maxUtilization = None # Now, we set the parameters that control load balancing. The user # can still provide a control parameter that is incompatible with # the balancing mode; like the add-backend subcommand, we leave it # to the server to perform validation on this. # # TODO(user): In the future, we probably should do this # validation client-side, so we can produce better error messages. if args.max_utilization is not None: backend_to_update.maxUtilization = args.max_utilization if args.max_rate is not None: backend_to_update.maxRate = args.max_rate backend_to_update.maxRatePerInstance = None if args.max_rate_per_instance is not None: backend_to_update.maxRate = None backend_to_update.maxRatePerInstance = args.max_rate_per_instance if args.capacity_scaler is not None: backend_to_update.capacityScaler = args.capacity_scaler return replacement
def Modify(self, args, existing): """Returns a modified URL map message.""" replacement = copy.deepcopy(existing) if not args.new_hosts and not args.existing_host: new_hosts = ['*'] else: new_hosts = args.new_hosts # If --new-hosts is given, we check to make sure none of those # hosts already exist and once the check succeeds, we create the # new host rule. if new_hosts: new_hosts = set(new_hosts) for host_rule in existing.hostRules: for host in host_rule.hosts: if host in new_hosts: raise exceptions.ToolException( 'Cannot create a new host rule with host [{0}] because the ' 'host is already part of a host rule that references the path ' 'matcher [{1}].'.format(host, host_rule.pathMatcher)) replacement.hostRules.append( self.messages.HostRule(hosts=sorted(new_hosts), pathMatcher=args.path_matcher_name)) # If --existing-host is given, we check to make sure that the # corresponding host rule will not render a patch matcher # orphan. If the check succeeds, we change the path matcher of the # host rule. If the check fails, we remove the path matcher if # --delete-orphaned-path-matcher is given otherwise we fail. else: target_host_rule = None for host_rule in existing.hostRules: for host in host_rule.hosts: if host == args.existing_host: target_host_rule = host_rule break if target_host_rule: break if not target_host_rule: raise exceptions.ToolException( 'No host rule with host [{0}] exists. Check your spelling or ' 'use [--new-hosts] to create a new host rule.'.format( args.existing_host)) path_matcher_orphaned = True for host_rule in replacement.hostRules: if host_rule == target_host_rule: host_rule.pathMatcher = args.path_matcher_name continue if host_rule.pathMatcher == target_host_rule.pathMatcher: path_matcher_orphaned = False break if path_matcher_orphaned: # A path matcher will be orphaned, so now we determine whether # we should delete the path matcher or report an error. if args.delete_orphaned_path_matcher: replacement.pathMatchers = [ path_matcher for path_matcher in existing.pathMatchers if path_matcher.name != target_host_rule.pathMatcher ] else: raise exceptions.ToolException( 'This operation will orphan the path matcher [{0}]. To ' 'delete the orphan path matcher, rerun this command with ' '[--delete-orphaned-path-matcher] or use [gcloud compute ' 'url-maps edit] to modify the URL map by hand.'.format( host_rule.pathMatcher)) # Creates PathRule objects from --path-rules. service_map = collections.defaultdict(set) for path, service in args.path_rules.iteritems(): service_map[service].add(path) path_rules = [] for service, paths in sorted(service_map.iteritems()): path_rules.append( self.messages.PathRule( paths=sorted(paths), service=self.CreateGlobalReference( service, resource_type='backendServices').SelfLink())) new_path_matcher = self.messages.PathMatcher( defaultService=self.CreateGlobalReference( args.default_service, resource_type='backendServices').SelfLink(), description=args.description, name=args.path_matcher_name, pathRules=path_rules) replacement.pathMatchers.append(new_path_matcher) return replacement
def _List(requests, http, batch_url, errors): """Makes a series of list and/or aggregatedList batch requests. Args: requests: A list of requests to make. Each element must be a 3-element tuple where the first element is the service, the second element is the method ('List' or 'AggregatedList'), and the third element is a protocol buffer representing either a list or aggregatedList request. http: An httplib2.Http-like object. batch_url: The handler for making batch requests. errors: A list for capturing errors. If any response contains an error, it is added to this list. Yields: Resources encapsulated as protocol buffers as they are received from the server. """ while requests: responses, request_errors = batch_helper.MakeRequests( requests=requests, http=http, batch_url=batch_url) errors.extend(request_errors) new_requests = [] for i, response in enumerate(responses): if not response: continue service, method, request_protobuf = requests[i] # If the request is a list call, then yield the items directly. if method == 'List': for item in response.items: yield item # If the request is an aggregatedList call, then do all the # magic necessary to get the actual resources because the # aggregatedList responses are very complicated data # structures... else: items_field_name = service.GetMethodConfig( 'AggregatedList').relative_path.split('/')[-1] for scope_result in response.items.additionalProperties: # If the given scope is unreachable, record the warning # message in the errors list. warning = scope_result.value.warning if (warning and warning.code == warning.CodeValueValuesEnum.UNREACHABLE): errors.append((None, warning.message)) items = getattr(scope_result.value, items_field_name) for item in items: yield item next_page_token = response.nextPageToken if next_page_token: new_request_protobuf = copy.deepcopy(request_protobuf) new_request_protobuf.pageToken = next_page_token new_requests.append((service, method, new_request_protobuf)) requests = new_requests
def _AddKey(self, key, attribute_add): """Propagates default attribute values and adds key to the projection. Args: key: The parsed key to add. attribute_add: Parsed _Attribute to add. """ projection = self._root # Add or update the inner nodes. for name in key[:-1]: tree = projection.tree if name in tree: attribute = tree[name].attribute if attribute.flag != self._projection.PROJECT: attribute.flag = self._projection.INNER else: tree[name] = self._Tree(self._Attribute( self._projection.INNER)) projection = tree[name] # Add or update the terminal node. tree = projection.tree # self.key == [] => . or a function on the entire object. name = key[-1] if key else '' name_in_tree = name in tree if name_in_tree: # Already added. if (not self.__key_attributes_only and any( [col for col in self._projection.Columns() if col.key == key])): # A duplicate column. A projection can only have one attribute object # per key. The first <key, attribute> pair added to the current set of # columns remains in the projection. Projection columns may have # duplicate keys (e.g., table columns with the same key but different # transforms). The attribute copy, with attribute_add merged in, is # added to the projection columns but not the projection tree. attribute = copy.copy(tree[name].attribute) else: attribute = tree[name].attribute attribute.hidden = False elif isinstance(name, (int, long)) and None in tree: # New projection for explicit name using slice defaults. tree[name] = copy.deepcopy(tree[None]) attribute = tree[name].attribute else: # New projection. attribute = attribute_add if self.__key_attributes_only and attribute.order: attribute.hidden = True if key or attribute.transform: tree[name] = self._Tree(attribute) # Propagate non-default values from attribute_add to attribute. if attribute_add.order is not None: attribute.order = attribute_add.order if attribute_add.label is not None: attribute.label = attribute_add.label elif attribute.label is None: attribute.label = self._AngrySnakeCase(key) if attribute_add.align != resource_projection_spec.ALIGN_DEFAULT: attribute.align = attribute_add.align if attribute_add.optional is not None: attribute.optional = attribute_add.optional elif attribute.optional is None: attribute.optional = False if attribute_add.reverse is not None: attribute.reverse = attribute_add.reverse elif attribute.reverse is None: attribute.reverse = False if attribute_add.transform: attribute.transform = attribute_add.transform if attribute_add.subformat: attribute.subformat = attribute_add.subformat self._projection.AddAlias(attribute.label, key) if not self.__key_attributes_only or attribute.hidden: # This key is in the projection. attribute.flag = self._projection.PROJECT self._projection.AddKey(key, attribute) elif not name_in_tree: # This is a new attributes only key. attribute.flag = self._projection.DEFAULT
def Begin(self): """Begins the transaction, returning a list of files that need uploading. All calls to AddFile must be made before calling Begin(). Returns: A list of pathnames for files that should be uploaded using UploadFile() before Commit() can be called. """ assert not self.in_transaction, 'Already in a transaction.' # Make a one-off copy of the given config, and send this tweaked config to # the "create" request without modifying the actual config belonging to this # AppVersionUploader object. config_copy = copy.deepcopy(self.module_yaml) for url in config_copy.handlers: handler_type = url.GetHandlerType() if url.application_readable: # Forward slashes are the only valid path separator regardless of # platform. if handler_type == 'static_dir': url.static_dir = '%s/%s' % (STATIC_FILE_PREFIX, url.static_dir) elif handler_type == 'static_files': url.static_files = '%s/%s' % (STATIC_FILE_PREFIX, url.static_files) url.upload = '%s/%s' % (STATIC_FILE_PREFIX, url.upload) response = self.logging_context.Send('/api/appversion/create', payload=config_copy.ToYAML()) result = self._ValidateBeginYaml(response) if result: warnings = result.get('warnings') for warning in warnings: log.warn(warning) self.in_transaction = True files_to_clone = [] blobs_to_clone = [] errorblobs = {} for path, content_hash in self.files.iteritems(): file_classification = FileClassification(self.module_yaml, path) if file_classification.IsStaticFile(): upload_path = path if file_classification.IsApplicationFile(): upload_path = '%s/%s' % (STATIC_FILE_PREFIX, path) blobs_to_clone.append((path, upload_path, content_hash, file_classification.StaticMimeType())) # Additionally check if this is an error blob. A file may be both a normal # blob and an error blob. if file_classification.IsErrorFile(): # TODO(jonmac): Clone error blobs instead of re-uploading them each and # every time. Punting for now because the savings here are incredibly # small but the code complexity is high. errorblobs[path] = content_hash if file_classification.IsApplicationFile(): files_to_clone.append((path, path, content_hash)) files_to_upload = {} def CloneFiles(url, files, file_type): """Sends files to the given url. Args: url: the server URL to use. files: a list of files file_type: the type of the files """ if not files: return log.debug('Cloning %d %s file%s.' % (len(files), file_type, len(files) != 1 and 's' or '')) # Do only N files at a time to avoid huge requests and responses. max_files = self.resource_limits['max_files_to_clone'] for i in xrange(0, len(files), max_files): if i > 0 and i % max_files == 0: log.debug('Cloned %d files.' % i) chunk = files[i:min(len(files), i + max_files)] result = self.logging_context.Send( url, payload=BuildClonePostBody(chunk)) if result: to_upload = {} for f in result.split(LIST_DELIMITER): for entry in files: real_path, upload_path = entry[:2] if f == upload_path: to_upload[real_path] = self.files[real_path] break files_to_upload.update(to_upload) CloneFiles('/api/appversion/cloneblobs', blobs_to_clone, 'static') CloneFiles('/api/appversion/clonefiles', files_to_clone, 'application') log.debug('Files to upload: %s', files_to_upload) for (path, content_hash) in errorblobs.iteritems(): files_to_upload[path] = content_hash self.files = files_to_upload return sorted(files_to_upload.iterkeys())
def _AddKey(self, key, attribute_add): """Propagates default attribute values and adds key to the projection. Args: key: The parsed key to add. attribute_add: Parsed _Attribute to add. """ projection = self._root # Add or update the inner nodes. for name in key[:-1]: tree = projection.tree if name in tree: attribute = tree[name].attribute if attribute.flag != self._projection.PROJECT: attribute.flag = self._projection.INNER else: tree[name] = self._Tree(self._Attribute( self._projection.INNER)) projection = tree[name] # Add or update the terminal node. tree = projection.tree # self.key == [] => a function on the entire object. name = key[-1] if key else '' name_in_tree = name in tree if name_in_tree: # Already added. attribute = tree[name].attribute elif isinstance(name, (int, long)) and None in tree: # New projection for explicit name using slice defaults. tree[name] = copy.deepcopy(tree[None]) attribute = tree[name].attribute else: # New projection. attribute = attribute_add tree[name] = self._Tree(attribute) # Propagate non-default values from attribute_add to attribute. if attribute_add.order is not None: attribute.order = attribute_add.order if attribute_add.label is not None: attribute.label = attribute_add.label elif attribute.label is None: attribute.label = self._AngrySnakeCase(key) if attribute_add.align != resource_projection_spec.ALIGN_DEFAULT: attribute.align = attribute_add.align if attribute_add.reverse is not None: attribute.reverse = attribute_add.reverse elif attribute.reverse is None: attribute.reverse = False if attribute_add.transform: attribute.transform = attribute_add.transform if attribute_add.subformat: attribute.subformat = attribute_add.subformat self._projection.AddAlias(attribute.label, key) if not self.__key_attributes_only: # This key is in the projection. attribute.flag = self._projection.PROJECT self._ordinal += 1 attribute.ordinal = self._ordinal self._projection.AddKey(key, attribute) elif not name_in_tree: # This is a new attributes only key. attribute.flag = self._projection.DEFAULT
def _ModifyBase(self, args, existing): """Modifications to the URL map that are shared between release tracks. Args: args: the argparse arguments that this command was invoked with. existing: the existing URL map message. Returns: A modified URL map message. """ replacement = copy.deepcopy(existing) if not args.new_hosts and not args.existing_host: new_hosts = ['*'] else: new_hosts = args.new_hosts # If --new-hosts is given, we check to make sure none of those # hosts already exist and once the check succeeds, we create the # new host rule. if new_hosts: new_hosts = set(new_hosts) for host_rule in existing.hostRules: for host in host_rule.hosts: if host in new_hosts: raise exceptions.ToolException( 'Cannot create a new host rule with host [{0}] because the ' 'host is already part of a host rule that references the path ' 'matcher [{1}].'.format(host, host_rule.pathMatcher)) replacement.hostRules.append(self.messages.HostRule( hosts=sorted(new_hosts), pathMatcher=args.path_matcher_name)) # If --existing-host is given, we check to make sure that the # corresponding host rule will not render a patch matcher # orphan. If the check succeeds, we change the path matcher of the # host rule. If the check fails, we remove the path matcher if # --delete-orphaned-path-matcher is given otherwise we fail. else: target_host_rule = None for host_rule in existing.hostRules: for host in host_rule.hosts: if host == args.existing_host: target_host_rule = host_rule break if target_host_rule: break if not target_host_rule: raise exceptions.ToolException( 'No host rule with host [{0}] exists. Check your spelling or ' 'use [--new-hosts] to create a new host rule.' .format(args.existing_host)) path_matcher_orphaned = True for host_rule in replacement.hostRules: if host_rule == target_host_rule: host_rule.pathMatcher = args.path_matcher_name continue if host_rule.pathMatcher == target_host_rule.pathMatcher: path_matcher_orphaned = False break if path_matcher_orphaned: # A path matcher will be orphaned, so now we determine whether # we should delete the path matcher or report an error. if args.delete_orphaned_path_matcher: replacement.pathMatchers = [ path_matcher for path_matcher in existing.pathMatchers if path_matcher.name != target_host_rule.pathMatcher] else: raise exceptions.ToolException( 'This operation will orphan the path matcher [{0}]. To ' 'delete the orphan path matcher, rerun this command with ' '[--delete-orphaned-path-matcher] or use [gcloud compute ' 'url-maps edit] to modify the URL map by hand.'.format( host_rule.pathMatcher)) return replacement