def Run(self, args): """Command execution logic.""" encryption_util.initialize_key_store(args) if args.path: storage_urls = [storage_url.storage_url_from_string(path) for path in args.path] for url in storage_urls: if not isinstance(url, storage_url.CloudUrl): raise errors.InvalidUrlError('Ls only works for cloud URLs.' ' Error for: {}'.format(url.url_string)) else: storage_urls = [storage_url.CloudUrl(cloud_api.DEFAULT_PROVIDER)] if args.full: display_detail = cloud_list_task.DisplayDetail.FULL elif args.json: display_detail = cloud_list_task.DisplayDetail.JSON elif args.long: display_detail = cloud_list_task.DisplayDetail.LONG else: display_detail = cloud_list_task.DisplayDetail.SHORT tasks = [] for url in storage_urls: tasks.append( cloud_list_task.CloudListTask( url, all_versions=args.all_versions, buckets_flag=args.buckets, display_detail=display_detail, include_etag=args.etag, readable_sizes=args.readable_sizes, recursion_flag=args.recursive)) task_executor.execute_tasks(tasks, parallelizable=False)
def Run(self, args): if args.stdin: if args.urls: raise errors.Error( 'No URL arguments allowed when reading URLs from stdin.') urls = stdin_iterator.StdinIterator() else: if not args.urls: raise errors.Error( 'Without the --stdin flag, the rm command requires at least one URL' ' argument.') urls = args.urls name_expansion_iterator = name_expansion.NameExpansionIterator( urls, all_versions=args.all_versions or args.recursive, include_buckets=args.recursive, recursion_requested=args.recursive) user_request_args = (user_request_args_factory. get_user_request_args_from_command_args(args)) task_status_queue = task_graph_executor.multiprocessing_context.Queue() task_iterator_factory = ( delete_task_iterator_factory.DeleteTaskIteratorFactory( name_expansion_iterator, task_status_queue=task_status_queue, user_request_args=user_request_args)) log.status.Print('Removing objects:') object_exit_code = task_executor.execute_tasks( task_iterator_factory.object_iterator(), parallelizable=True, task_status_queue=task_status_queue, progress_manager_args=task_status.ProgressManagerArgs( increment_type=task_status.IncrementType.INTEGER, manifest_path=None), continue_on_error=args.continue_on_error) bucket_iterator = plurality_checkable_iterator.PluralityCheckableIterator( task_iterator_factory.bucket_iterator()) # We perform the is_empty check to avoid printing unneccesary status lines. if args.recursive and not bucket_iterator.is_empty(): log.status.Print('Removing Buckets:') bucket_exit_code = task_executor.execute_tasks( bucket_iterator, parallelizable=True, task_status_queue=task_status_queue, progress_manager_args=task_status.ProgressManagerArgs( increment_type=task_status.IncrementType.INTEGER, manifest_path=None), continue_on_error=args.continue_on_error) else: bucket_exit_code = 0 self.exit_code = max(object_exit_code, bucket_exit_code)
def Run(self, args): task_status_queue = task_graph_executor.multiprocessing_context.Queue() self.exit_code = task_executor.execute_tasks( self.update_task_iterator(args), parallelizable=True, task_status_queue=task_status_queue, progress_manager_args=task_status.ProgressManagerArgs( increment_type=task_status.IncrementType.INTEGER, manifest_path=None), continue_on_error=args.continue_on_error, )
def Run(self, args): for url_string in args.urls: if not storage_url.storage_url_from_string(url_string).is_bucket(): raise errors.InvalidUrlError( 'buckets delete only accepts cloud bucket URLs. Example:' ' "gs://bucket"') task_status_queue = multiprocessing.Queue() bucket_iterator = delete_task_iterator_factory.DeleteTaskIteratorFactory( name_expansion.NameExpansionIterator(args.urls, include_buckets=True), task_status_queue=task_status_queue).bucket_iterator() plurality_checkable_bucket_iterator = ( plurality_checkable_iterator.PluralityCheckableIterator( bucket_iterator)) task_executor.execute_tasks( plurality_checkable_bucket_iterator, parallelizable=True, task_status_queue=task_status_queue, progress_manager_args=task_status.ProgressManagerArgs( increment_type=task_status.IncrementType.INTEGER, manifest_path=None))
def Run(self, args): if args.no_clobber and args.if_generation_match: raise ValueError( 'Cannot specify both generation precondition and no-clobber.') encryption_util.initialize_key_store(args) source_expansion_iterator = name_expansion.NameExpansionIterator( args.source, all_versions=args.all_versions, recursion_requested=args.recursive, ignore_symlinks=args.ignore_symlinks) task_status_queue = task_graph_executor.multiprocessing_context.Queue() raw_destination_url = storage_url.storage_url_from_string( args.destination) if (isinstance(raw_destination_url, storage_url.FileUrl) and args.storage_class): raise ValueError( 'Cannot specify storage class for a non-cloud destination: {}'. format(raw_destination_url)) parallelizable = True shared_stream = None if (args.all_versions and (properties.VALUES.storage.process_count.GetInt() != 1 or properties.VALUES.storage.thread_count.GetInt() != 1)): log.warning( 'Using sequential instead of parallel task execution. This will' ' maintain version ordering when copying all versions of an object.' ) parallelizable = False if (isinstance(raw_destination_url, storage_url.FileUrl) and raw_destination_url.is_pipe): log.warning('Downloading to a pipe.' ' This command may stall until the pipe is read.') parallelizable = False shared_stream = files.BinaryFileWriter(args.destination) user_request_args = ( user_request_args_factory.get_user_request_args_from_command_args( args, metadata_type=user_request_args_factory.MetadataType.OBJECT)) task_iterator = copy_task_iterator.CopyTaskIterator( source_expansion_iterator, args.destination, custom_md5_digest=args.content_md5, do_not_decompress=args.do_not_decompress, print_created_message=args.print_created_message, shared_stream=shared_stream, skip_unsupported=args.skip_unsupported, task_status_queue=task_status_queue, user_request_args=user_request_args, ) self.exit_code = task_executor.execute_tasks( task_iterator, parallelizable=parallelizable, task_status_queue=task_status_queue, progress_manager_args=task_status.ProgressManagerArgs( task_status.IncrementType.FILES_AND_BYTES, manifest_path=user_request_args.manifest_path, ), continue_on_error=args.continue_on_error, ) if shared_stream: shared_stream.close()