示例#1
0
    def Run(self, args):
        if args.stdin:
            if args.urls:
                raise errors.Error(
                    'No URL arguments allowed when reading URLs from stdin.')
            urls = stdin_iterator.StdinIterator()
        else:
            if not args.urls:
                raise errors.Error(
                    'Without the --stdin flag, the rm command requires at least one URL'
                    ' argument.')
            urls = args.urls

        name_expansion_iterator = name_expansion.NameExpansionIterator(
            urls,
            all_versions=args.all_versions or args.recursive,
            include_buckets=args.recursive,
            recursion_requested=args.recursive)

        user_request_args = (user_request_args_factory.
                             get_user_request_args_from_command_args(args))
        task_status_queue = task_graph_executor.multiprocessing_context.Queue()
        task_iterator_factory = (
            delete_task_iterator_factory.DeleteTaskIteratorFactory(
                name_expansion_iterator,
                task_status_queue=task_status_queue,
                user_request_args=user_request_args))

        log.status.Print('Removing objects:')
        object_exit_code = task_executor.execute_tasks(
            task_iterator_factory.object_iterator(),
            parallelizable=True,
            task_status_queue=task_status_queue,
            progress_manager_args=task_status.ProgressManagerArgs(
                increment_type=task_status.IncrementType.INTEGER,
                manifest_path=None),
            continue_on_error=args.continue_on_error)

        bucket_iterator = plurality_checkable_iterator.PluralityCheckableIterator(
            task_iterator_factory.bucket_iterator())

        # We perform the is_empty check to avoid printing unneccesary status lines.
        if args.recursive and not bucket_iterator.is_empty():
            log.status.Print('Removing Buckets:')
            bucket_exit_code = task_executor.execute_tasks(
                bucket_iterator,
                parallelizable=True,
                task_status_queue=task_status_queue,
                progress_manager_args=task_status.ProgressManagerArgs(
                    increment_type=task_status.IncrementType.INTEGER,
                    manifest_path=None),
                continue_on_error=args.continue_on_error)
        else:
            bucket_exit_code = 0
        self.exit_code = max(object_exit_code, bucket_exit_code)
示例#2
0
def validate_cmek(raw_key):
    if not raw_key:
        raise errors.Error('Key is empty.')

    if raw_key.startswith('/'):
        raise errors.Error(
            'KMS key should not start with leading slash (/): ' + raw_key)

    if not _CMEK_REGEX.match(raw_key):
        raise errors.Error(
            'Invalid KMS key name: {}.\nKMS keys should follow the format '
            '"projects/<project-id>/locations/<location>/keyRings/<keyring>/'
            'cryptoKeys/<key-name>"'.format(raw_key))
示例#3
0
def _get_hashed_path(tracker_file_name, tracker_file_type,
                     resumable_tracker_directory):
    """Hashes and returns a tracker file path.

  Args:
    tracker_file_name (str): The tracker file name prior to it being hashed.
    tracker_file_type (TrackerFileType): The TrackerFileType of
      res_tracker_file_name.
    resumable_tracker_directory (str): Path to directory of tracker files.

  Returns:
    Final (hashed) tracker file path.

  Raises:
    Error: Hashed file path is too long.
  """
    hashed_tracker_file_name = _get_hashed_file_name(tracker_file_name)
    tracker_file_name_with_type = '{}_{}'.format(
        tracker_file_type.value.lower(), hashed_tracker_file_name)
    if len(tracker_file_name_with_type) > _MAX_TRACKER_FILE_NAME_LENGTH:
        raise errors.Error(
            'Tracker file name hash is over max character limit of {}: {}'.
            format(_MAX_TRACKER_FILE_NAME_LENGTH, tracker_file_name_with_type))

    tracker_file_path = (resumable_tracker_directory + os.sep +
                         tracker_file_name_with_type)
    return tracker_file_path
示例#4
0
    def upload_object(self,
                      source_stream,
                      destination_resource,
                      progress_callback=None,
                      request_config=None,
                      serialization_data=None,
                      tracker_callback=None,
                      upload_strategy=cloud_api.UploadStrategy.SIMPLE):
        """See CloudApi class for function doc strings."""
        del progress_callback  # Unused.
        if self._upload_http_client is None:
            self._upload_http_client = transports.GetApitoolsTransport()

        validated_request_config = cloud_api.get_provider_request_config(
            request_config, GcsRequestConfig)

        if upload_strategy == cloud_api.UploadStrategy.SIMPLE:
            upload = gcs_upload.SimpleUpload(self, self._upload_http_client,
                                             source_stream,
                                             DEFAULT_CONTENT_TYPE,
                                             destination_resource,
                                             validated_request_config)
        elif upload_strategy == cloud_api.UploadStrategy.RESUMABLE:
            upload = gcs_upload.ResumableUpload(
                self, self._upload_http_client, source_stream,
                DEFAULT_CONTENT_TYPE, destination_resource,
                validated_request_config, serialization_data, tracker_callback)
        else:
            raise command_errors.Error('Invalid upload strategy: {}.'.format(
                upload_strategy.value))

        return gcs_metadata_util.get_object_resource_from_metadata(
            upload.run())
def _get_hashed_path(tracker_file_name, tracker_file_type,
                     resumable_tracker_directory, component_number):
    """Hashes and returns a tracker file path.

  Args:
    tracker_file_name (str): The tracker file name prior to it being hashed.
    tracker_file_type (TrackerFileType): The TrackerFileType of
      res_tracker_file_name.
    resumable_tracker_directory (str): Path to directory of tracker files.
    component_number (int|None): The number of the component is being tracked
      for a sliced download or composite upload.

  Returns:
    Final (hashed) tracker file path.

  Raises:
    Error: Hashed file path is too long.
  """
    hashed_tracker_file_name = _get_hashed_file_name(tracker_file_name)
    tracker_file_name_with_type = '{}_{}'.format(
        tracker_file_type.value.lower(), hashed_tracker_file_name)
    if component_number is not None:
        tracker_file_name_with_type += '_{}'.format(component_number)

    if len(tracker_file_name_with_type) > _MAX_TRACKER_FILE_NAME_LENGTH:
        raise errors.Error(
            'Tracker file name hash is over max character limit of {}: {}'.
            format(_MAX_TRACKER_FILE_NAME_LENGTH, tracker_file_name_with_type))

    tracker_file_path = (resumable_tracker_directory + os.sep +
                         tracker_file_name_with_type)
    return tracker_file_path
示例#6
0
    def upload_object(self,
                      source_stream,
                      destination_resource,
                      request_config,
                      source_resource=None,
                      serialization_data=None,
                      tracker_callback=None,
                      upload_strategy=cloud_api.UploadStrategy.SIMPLE):
        """See CloudApi class for function doc strings."""
        if self._upload_http_client is None:
            self._upload_http_client = transports.GetApitoolsTransport(
                redact_request_body_reason=
                ('Object data is not displayed to keep the log output clean.'
                 ' Set log_http_show_request_body property to True to print the'
                 ' body of this request.'))

        if source_resource:
            source_path = source_resource.storage_url.versionless_url_string
        else:
            source_path = None
        should_gzip_in_flight = gzip_util.should_gzip_in_flight(
            request_config.gzip_settings, source_path)
        if should_gzip_in_flight:
            log.info('Using compressed transport encoding for {}.'.format(
                source_path))
        if upload_strategy == cloud_api.UploadStrategy.SIMPLE:
            upload = gcs_upload.SimpleUpload(self, self._upload_http_client,
                                             source_stream,
                                             destination_resource,
                                             should_gzip_in_flight,
                                             request_config, source_resource)
        elif upload_strategy == cloud_api.UploadStrategy.RESUMABLE:
            upload = gcs_upload.ResumableUpload(
                self, self._upload_http_client, source_stream,
                destination_resource, should_gzip_in_flight, request_config,
                source_resource, serialization_data, tracker_callback)
        else:
            raise command_errors.Error('Invalid upload strategy: {}.'.format(
                upload_strategy.value))

        encryption_key = getattr(request_config.resource_args,
                                 'encryption_key', None)
        try:
            with self._encryption_headers_context(encryption_key):
                metadata = upload.run()
        except (
                apitools_exceptions.StreamExhausted,
                apitools_exceptions.TransferError,
        ) as error:
            raise cloud_errors.ResumableUploadAbortError(
                '{}\n This likely occurred because the file being uploaded changed '
                'size between resumable upload attempts. If this error persists, try '
                'deleting the tracker files present in {}'.format(
                    str(error),
                    properties.VALUES.storage.tracker_files_directory.Get()))

        return gcs_metadata_util.get_object_resource_from_metadata(metadata)
def _process_factory(task_queue, task_output_queue, task_status_queue,
                     thread_count, idle_thread_count, signal_queue,
                     shared_process_context):
    """Create worker processes.

  This factory must run in a separate process to avoid deadlock issue,
  see go/gcloud-storage-deadlock-issue/. Although we are adding one
  extra process by doing this, it will remain idle once all the child worker
  processes are created. Thus, it does not add noticable burden on the system.

  Args:
    task_queue (multiprocessing.Queue): Holds task_graph.TaskWrapper instances.
    task_output_queue (multiprocessing.Queue): Sends information about completed
      tasks back to the main process.
    task_status_queue (multiprocessing.Queue|None): Used by task to report it
      progress to a central location.
    thread_count (int): Number of threads the process should spawn.
    idle_thread_count (multiprocessing.Semaphore): Passed on to worker threads.
    signal_queue (multiprocessing.Queue): Queue used by parent process to
      signal when a new child worker process must be created.
    shared_process_context (SharedProcessContext): Holds values from global
      state that need to be replicated in child processes.
  """
    processes = []
    while True:
        # We receive one signal message for each process to be created.
        signal = signal_queue.get()
        if signal == _SHUTDOWN:
            for _ in processes:
                for _ in range(thread_count):
                    task_queue.put(_SHUTDOWN)
            break
        elif signal == _CREATE_WORKER_PROCESS:
            for _ in range(thread_count):
                idle_thread_count.release()

            process = multiprocessing_context.Process(
                target=_process_worker,
                args=(task_queue, task_output_queue, task_status_queue,
                      thread_count, idle_thread_count, shared_process_context))
            processes.append(process)
            log.debug('Adding 1 process with {} threads.'
                      ' Total processes: {}. Total threads: {}.'.format(
                          thread_count, len(processes),
                          len(processes) * thread_count))
            process.start()
        else:
            raise errors.Error('Received invalid signal for worker '
                               'process creation: {}'.format(signal))

    for process in processes:
        process.join()
    def upload_object(self,
                      source_stream,
                      destination_resource,
                      request_config,
                      source_resource=None,
                      serialization_data=None,
                      tracker_callback=None,
                      upload_strategy=cloud_api.UploadStrategy.SIMPLE):
        """See super class."""
        del serialization_data, source_resource, tracker_callback  # Unused.

        if upload_strategy != cloud_api.UploadStrategy.SIMPLE:
            raise command_errors.Error('Invalid upload strategy: {}.'.format(
                upload_strategy.value))

        # All fields common to both put_object and upload_fileobj are added
        # to the extra_args dict.
        extra_args = s3_metadata_util.get_object_metadata_dict_from_request_config(
            request_config)

        md5_hash = getattr(request_config.resource_args, 'md5_hash', None)
        if md5_hash:
            # The upload_fileobj method can perform multipart uploads, so it cannot
            # validate with user-provided MD5 hashes. Hence we use the put_object API
            # method if MD5 validation is requested.
            if request_config.resource_args.size > MAX_PUT_OBJECT_SIZE:
                log.debug('The MD5 hash %s will be ignored', md5_hash)
                log.warning(
                    'S3 does not support MD5 validation for the entire object if'
                    ' size > %d bytes. File size: %d', MAX_PUT_OBJECT_SIZE,
                    request_config.resource_args.size)

                # ContentMD5 might get populated for extra_args during request_config
                # translation. Remove it since upload_fileobj
                # does not accept ContentMD5.
                extra_args.pop('ContentMD5')
            else:
                if request_config.resource_args.size is not None:
                    extra_args[
                        'ContentLength'] = request_config.resource_args.size
                return self._upload_using_put_object(source_stream,
                                                     destination_resource,
                                                     extra_args)

        # We default to calling the upload_fileobj method provided by boto3 which
        # is a managed-transfer utility that can perform multipart uploads
        # automatically. It can be used for non-seekable source_streams as well.
        return self._upload_using_managed_transfer_utility(
            source_stream, destination_resource, extra_args)
示例#9
0
    def upload_object(self,
                      source_stream,
                      destination_resource,
                      progress_callback=None,
                      request_config=None,
                      serialization_data=None,
                      tracker_callback=None,
                      upload_strategy=cloud_api.UploadStrategy.SIMPLE):
        """See super class."""
        del progress_callback, serialization_data, tracker_callback

        if upload_strategy != cloud_api.UploadStrategy.SIMPLE:
            raise command_errors.Error('Invalid upload strategy: {}.'.format(
                upload_strategy.value))

        if request_config is None:
            request_config = cloud_api.RequestConfig()

        # All fields common to both put_object and upload_fileobj are added
        # to the extra_args dict.
        extra_args = {}

        if request_config.predefined_acl_string:
            extra_args['ACL'] = _translate_predefined_acl_string_to_s3(
                request_config.predefined_acl_string)

        if request_config.md5_hash:
            # The upload_fileobj method can perform multipart uploads, so it cannot
            # validate with user-provided MD5 hashes. Hence we use the put_object API
            # method if MD5 validation is requested.
            if request_config.size > MAX_PUT_OBJECT_SIZE:
                raise errors.S3ApiError(
                    'Cannot upload to destination: {url} because MD5 validation can'
                    ' only be performed for file size <= {maxsize} Bytes. Current file'
                    ' size is {filesize} Bytes. You can remove the MD5 validation'
                    ' requirement to complete the upload'.format(
                        url=destination_resource.storage_url.url_string,
                        maxsize=MAX_PUT_OBJECT_SIZE,
                        filesize=request_config.size))
            extra_args['ContentMD5'] = request_config.md5_hash
            return self._upload_using_put_object(source_stream,
                                                 destination_resource,
                                                 extra_args)
        else:
            # We default to calling the upload_fileobj method provided by boto3 which
            # is a managed-transfer utility that can perform mulitpart uploads
            # automatically. It can be used for non-seekable source_streams as well.
            return self._upload_using_managed_transfer_utility(
                source_stream, destination_resource, extra_args)
def _log_or_raise_crc32c_issues(resource):
    """Informs user about non-standard hashing behavior.

  Args:
    resource (resource_reference.ObjectResource): For checking if object has
      known hash to validate against.

  Raises:
    errors.Error: gcloud storage set to fail if performance-optimized digesters
      could not be created.
  """
    if crc32c.IS_FAST_GOOGLE_CRC32C_AVAILABLE or not resource.crc32c_hash:
        # If crc32c is available, hashing behavior will be standard.
        # If resource.crc32c not available, no hash will be verified.
        return

    check_hashes = properties.VALUES.storage.check_hashes.Get()
    if check_hashes == properties.CheckHashes.ALWAYS.value:
        log.warning(_SLOW_HASH_CHECK_WARNING)
    elif check_hashes == properties.CheckHashes.IF_FAST_ELSE_SKIP.value:
        log.warning(_NO_HASH_CHECK_WARNING)
    elif check_hashes == properties.CheckHashes.IF_FAST_ELSE_FAIL.value:
        raise errors.Error(_NO_HASH_CHECK_ERROR)
    def execute(self, task_status_queue=None):
        """Validates and clean ups after sliced download."""
        component_error_occurred = False
        for message in self.received_messages:
            if message.topic is task.Topic.ERROR:
                log.error(message.payload)
                component_error_occurred = True
        if component_error_occurred:
            raise errors.Error(
                'Failed to download one or more component of sliced download.')

        temporary_object_path = (
            self._temporary_destination_resource.storage_url.object_name)
        final_destination_object_path = (
            self._final_destination_resource.storage_url.object_name)
        if (properties.VALUES.storage.check_hashes.Get() !=
                properties.CheckHashes.NEVER.value
                and self._source_resource.crc32c_hash):

            component_payloads = [
                message.payload for message in self.received_messages
                if message.topic == task.Topic.CRC32C
            ]
            if component_payloads:
                # Returns list of payload values sorted by component number.
                sorted_component_payloads = sorted(
                    component_payloads, key=lambda d: d['component_number'])

                downloaded_file_checksum = sorted_component_payloads[0][
                    'crc32c_checksum']
                for i in range(1, len(sorted_component_payloads)):
                    payload = sorted_component_payloads[i]
                    downloaded_file_checksum = crc32c.concat_checksums(
                        downloaded_file_checksum,
                        payload['crc32c_checksum'],
                        b_byte_count=payload['length'])

                downloaded_file_hash_object = crc32c.get_crc32c_from_checksum(
                    downloaded_file_checksum)
                downloaded_file_hash_digest = crc32c.get_hash(
                    downloaded_file_hash_object)

                download_util.validate_download_hash_and_delete_corrupt_files(
                    temporary_object_path, self._source_resource.crc32c_hash,
                    downloaded_file_hash_digest)

        download_util.decompress_or_rename_file(
            self._source_resource,
            temporary_object_path,
            final_destination_object_path,
            do_not_decompress_flag=self._do_not_decompress)

        if self._user_request_args and self._user_request_args.system_posix_data:
            posix_util.set_posix_attributes_on_file(
                final_destination_object_path,
                task_util.get_first_matching_message_payload(
                    self.received_messages,
                    task.Topic.API_DOWNLOAD_RESULT).posix_attributes)

        tracker_file_util.delete_download_tracker_files(
            self._temporary_destination_resource.storage_url)

        if self._print_created_message:
            log.status.Print(
                'Created: {}'.format(final_destination_object_path))
        if self._send_manifest_messages:
            # Does not send md5_hash because sliced download uses CRC32C.
            manifest_util.send_success_message(
                task_status_queue, self._source_resource,
                self._final_destination_resource)

        if self._delete_source:
            return task.Output(additional_task_iterators=[[
                delete_object_task.DeleteObjectTask(
                    self._source_resource.storage_url),
            ]],
                               messages=None)
    def read(self, size=-1):
        """Reads size bytes from the buffer queue and returns it.

    This method will be blocked if the buffer_queue is empty.
    If size > length of data available, the entire data is sent over.

    Args:
      size (int): The number of bytes to be read.

    Returns:
      Bytes of length 'size'. May return bytes of length less than the size
        if there are no more bytes left to be read.

    Raises:
      _AbruptShutdownError: If self._shudown_event was set.
      storage.errors.Error: If size is not within the allowed range of
        [-1, MAX_ALLOWED_READ_SIZE] OR
        If size is -1 but the object size is greater than MAX_ALLOWED_READ_SIZE.
    """
        if size == 0:
            return b''

        if size > MAX_ALLOWED_READ_SIZE:
            raise errors.Error(
                'Invalid HTTP read size {} during daisy chain operation, expected'
                ' -1 <= size <= {} bytes.'.format(size, MAX_ALLOWED_READ_SIZE))

        if size == -1:
            # This indicates that we have to read the entire object at once.
            if self._end_position <= MAX_ALLOWED_READ_SIZE:
                chunk_size = self._end_position
            else:
                raise errors.Error(
                    'Read with size=-1 is not allowed for object'
                    ' size > {} bytes to prevent reading large objects'
                    ' in-memory.'.format(MAX_ALLOWED_READ_SIZE))
        else:
            chunk_size = size

        result = io.BytesIO()
        bytes_read = 0

        while bytes_read < chunk_size and self._position < self._end_position:
            if not self._unused_data_from_previous_read:
                with self._buffer_condition:
                    while not self._buffer_queue and not self._shutdown_event.is_set(
                    ):
                        self._buffer_condition.wait()

                    # The shutdown_event needs to be checked before the data is fetched
                    # from the buffer.
                    if self._shutdown_event.is_set():
                        raise _AbruptShutdownError()

                    data = self._buffer_queue.popleft()
                    self._buffer_condition.notify_all()
            else:
                # Data is already present from previous read.
                if self._shutdown_event.is_set():
                    raise _AbruptShutdownError()
                data = self._unused_data_from_previous_read

            if bytes_read + len(data) > chunk_size:
                self._unused_data_from_previous_read = data[chunk_size -
                                                            bytes_read:]
                data_to_return = data[:chunk_size - bytes_read]
            else:
                self._unused_data_from_previous_read = b''
                data_to_return = data
            result.write(data_to_return)
            bytes_read += len(data_to_return)
            self._position += len(data_to_return)

        return result.getvalue()
示例#13
0
    def execute(self, task_status_queue=None):
        uploaded_components = [
            message.payload for message in self.received_messages
            if message.topic == task.Topic.UPLOADED_COMPONENT
        ]

        if len(uploaded_components) != self._expected_component_count:
            raise errors.Error(
                'Temporary components were not uploaded correctly.'
                ' Please retry this upload.')

        uploaded_objects = [
            component.object_resource for component in sorted(
                uploaded_components,
                key=lambda component: component.component_number)
        ]

        compose_task = compose_objects_task.ComposeObjectsTask(
            uploaded_objects,
            self._destination_resource,
            original_source_resource=self._source_resource,
            user_request_args=self._user_request_args)
        compose_task_output = compose_task.execute(
            task_status_queue=task_status_queue)

        result_resource = task_util.get_first_matching_message_payload(
            compose_task_output.messages, task.Topic.CREATED_RESOURCE)
        if result_resource:
            if self._print_created_message:
                log.status.Print('Created: {}'.format(
                    result_resource.storage_url))
            if self._send_manifest_messages:
                manifest_util.send_success_message(
                    task_status_queue,
                    self._source_resource,
                    self._destination_resource,
                    md5_hash=result_resource.md5_hash)

        # After a successful compose call, we consider the upload complete and can
        # delete tracker files.
        tracker_file_path = tracker_file_util.get_tracker_file_path(
            self._destination_resource.storage_url,
            tracker_file_util.TrackerFileType.PARALLEL_UPLOAD,
            source_url=self._source_resource)
        tracker_file_util.delete_tracker_file(tracker_file_path)

        if gzip_util.should_gzip_locally(
                getattr(self._user_request_args, 'gzip_settings', None),
                self._source_path) and self._source_path.endswith(
                    storage_url.TEMPORARY_FILE_SUFFIX):
            # Delete temporary gzipped version of source file.
            os.remove(self._source_path)
        if self._delete_source:
            # Delete original source file.
            os.remove(self._source_resource.storage_url.object_name)

        return task.Output(additional_task_iterators=[[
            delete_temporary_components_task.DeleteTemporaryComponentsTask(
                self._source_resource,
                self._destination_resource,
                self._random_prefix,
            )
        ]],
                           messages=None)
def _get_request_config_resource_args(url,
                                      content_type=None,
                                      decryption_key_hash=None,
                                      error_on_missing_key=True,
                                      md5_hash=None,
                                      size=None,
                                      user_request_args=None):
    """Generates metadata for API calls to storage buckets and objects."""
    if not isinstance(url, storage_url.CloudUrl):
        return None
    user_resource_args = getattr(user_request_args, 'resource_args', None)
    new_resource_args = None

    if url.is_bucket():
        if url.scheme in storage_url.VALID_CLOUD_SCHEMES:
            if url.scheme == storage_url.ProviderPrefix.GCS:
                new_resource_args = _GcsBucketConfig()
                if user_resource_args:
                    new_resource_args.default_encryption_key = (
                        user_resource_args.default_encryption_key)
                    new_resource_args.default_event_based_hold = (
                        user_resource_args.default_event_based_hold)
                    new_resource_args.default_storage_class = (
                        user_resource_args.default_storage_class)
                    new_resource_args.retention_period = (
                        user_resource_args.retention_period)
                    new_resource_args.uniform_bucket_level_access = (
                        user_resource_args.uniform_bucket_level_access)

            elif url.scheme == storage_url.ProviderPrefix.S3:
                new_resource_args = _S3BucketConfig()
                _check_for_unsupported_s3_flags(user_request_args)

        else:
            new_resource_args = _BucketConfig()

        new_resource_args.location = getattr(user_resource_args, 'location',
                                             None)
        new_resource_args.cors_file_path = getattr(user_resource_args,
                                                   'cors_file_path', None)
        new_resource_args.labels_file_path = getattr(user_resource_args,
                                                     'labels_file_path', None)
        new_resource_args.labels_to_append = getattr(user_resource_args,
                                                     'labels_to_append', None)
        new_resource_args.labels_to_remove = getattr(user_resource_args,
                                                     'labels_to_remove', None)
        new_resource_args.lifecycle_file_path = getattr(
            user_resource_args, 'lifecycle_file_path', None)
        new_resource_args.log_bucket = getattr(user_resource_args,
                                               'log_bucket', None)
        new_resource_args.log_object_prefix = getattr(user_resource_args,
                                                      'log_object_prefix',
                                                      None)
        new_resource_args.requester_pays = getattr(user_resource_args,
                                                   'requester_pays', None)
        new_resource_args.versioning = getattr(user_resource_args,
                                               'versioning', None)
        new_resource_args.web_error_page = getattr(user_resource_args,
                                                   'web_error_page', None)
        new_resource_args.web_main_page_suffix = getattr(
            user_resource_args, 'web_main_page_suffix', None)

    elif url.is_object():
        if url.scheme == storage_url.ProviderPrefix.GCS:
            new_resource_args = _GcsObjectConfig()
            if user_resource_args:
                new_resource_args.custom_time = user_resource_args.custom_time

        elif url.scheme == storage_url.ProviderPrefix.S3:
            new_resource_args = _S3ObjectConfig()
            _check_for_unsupported_s3_flags(user_request_args)

        else:
            new_resource_args = _ObjectConfig()

        new_resource_args.content_type = content_type
        new_resource_args.md5_hash = md5_hash
        new_resource_args.size = size

        new_resource_args.encryption_key = encryption_util.get_encryption_key()
        if decryption_key_hash:
            new_resource_args.decryption_key = encryption_util.get_decryption_key(
                decryption_key_hash)
            if not new_resource_args.decryption_key and error_on_missing_key:
                raise errors.Error(
                    'Missing decryption key with SHA256 hash {}. No decryption key '
                    'matches object {}.'.format(decryption_key_hash, url))

        if user_resource_args:
            # User args should override existing settings.
            if user_resource_args.content_type is not None:
                if user_resource_args.content_type:
                    new_resource_args.content_type = user_resource_args.content_type
                else:  # Empty string or other falsey value but not completely unset.
                    new_resource_args.content_type = DEFAULT_CONTENT_TYPE

            if user_resource_args.md5_hash is not None:
                new_resource_args.md5_hash = user_resource_args.md5_hash

            new_resource_args.cache_control = user_resource_args.cache_control
            new_resource_args.content_disposition = user_resource_args.content_disposition
            new_resource_args.content_encoding = user_resource_args.content_encoding
            new_resource_args.content_language = user_resource_args.content_language
            new_resource_args.custom_metadata = user_resource_args.custom_metadata
            new_resource_args.preserve_acl = user_resource_args.preserve_acl

            if user_resource_args.storage_class:
                # Currently, all providers require all caps storage classes.
                new_resource_args.storage_class = (
                    user_resource_args.storage_class.upper())

    return new_resource_args