def RunCommand(self): """Command entry point for the acl command.""" action_subcommand = self.args.pop(0) self.ParseSubOpts(check_args=True) # Commands with both suboptions and subcommands need to reparse for # suboptions, so we log again. metrics.LogCommandParams(sub_opts=self.sub_opts) self.def_acl = False if action_subcommand == 'get': metrics.LogCommandParams(subcommands=[action_subcommand]) self.GetAndPrintAcl(self.args[0]) elif action_subcommand == 'set': metrics.LogCommandParams(subcommands=[action_subcommand]) self._SetAcl() elif action_subcommand in ('ch', 'change'): metrics.LogCommandParams(subcommands=[action_subcommand]) self._ChAcl() else: raise CommandException( ('Invalid subcommand "%s" for the %s command.\n' 'See "gsutil help acl".') % (action_subcommand, self.command_name)) return 0
def RunCommand(self): """Command entry point for the logging command.""" # Parse the subcommand and alias for the new logging command. action_subcommand = self.args.pop(0) if action_subcommand == 'get': func = self._Get metrics.LogCommandParams(subcommands=[action_subcommand]) elif action_subcommand == 'set': state_subcommand = self.args.pop(0) if not self.args: self.RaiseWrongNumberOfArgumentsException() if state_subcommand == 'on': func = self._Enable metrics.LogCommandParams( subcommands=[action_subcommand, state_subcommand]) elif state_subcommand == 'off': func = self._Disable metrics.LogCommandParams( subcommands=[action_subcommand, state_subcommand]) else: raise CommandException( ('Invalid subcommand "%s" for the "%s %s" command.\n' 'See "gsutil help logging".') % (state_subcommand, self.command_name, action_subcommand)) else: raise CommandException( ('Invalid subcommand "%s" for the %s command.\n' 'See "gsutil help logging".') % (action_subcommand, self.command_name)) self.ParseSubOpts(check_args=True) # Commands with both suboptions and subcommands need to reparse for # suboptions, so we log again. metrics.LogCommandParams(sub_opts=self.sub_opts) func() return 0
def RunCommand(self): """Command entry point for the lifecycle command.""" subcommand = self.args.pop(0) if subcommand == 'get': metrics.LogCommandParams(subcommands=[subcommand]) return self._GetLifecycleConfig() elif subcommand == 'set': metrics.LogCommandParams(subcommands=[subcommand]) return self._SetLifecycleConfig() else: raise CommandException('Invalid subcommand "%s" for the %s command.' % (subcommand, self.command_name))
def RunCommand(self): """Command entry point for the pap command.""" action_subcommand = self.args[0] self.ParseSubOpts(check_args=True) if action_subcommand == 'get' or action_subcommand == 'set': metrics.LogCommandParams(sub_opts=self.sub_opts) metrics.LogCommandParams(subcommands=[action_subcommand]) self._Pap() else: raise CommandException( 'Invalid subcommand "%s", use get|set instead.' % action_subcommand)
def RunCommand(self): """Command entry point for the notification command.""" subcommand = self.args.pop(0) if subcommand == 'watchbucket': metrics.LogCommandParams(subcommands=[subcommand]) return self._RunSubCommand(self._WatchBucket) elif subcommand == 'stopchannel': metrics.LogCommandParams(subcommands=[subcommand]) return self._RunSubCommand(self._StopChannel) else: raise CommandException( 'Invalid subcommand "%s" for the %s command.' % (subcommand, self.command_name))
def testExceptionCatchingDecorator(self): """Tests the exception catching decorator CaptureAndLogException.""" original_log_level = self.root_logger.getEffectiveLevel() self.root_logger.setLevel(logging.DEBUG) # Test that a wrapped function with an exception doesn't stop the process. mock_exc_fn = mock.MagicMock(__name__='mock_exc_fn', side_effect=Exception()) wrapped_fn = metrics.CaptureAndLogException(mock_exc_fn) wrapped_fn() self.assertEqual(1, mock_exc_fn.call_count) with open(self.log_handler_file) as f: log_output = f.read() self.assertIn( 'Exception captured in mock_exc_fn during metrics ' 'collection', log_output) mock_err_fn = mock.MagicMock(__name__='mock_err_fn', side_effect=TypeError()) wrapped_fn = metrics.CaptureAndLogException(mock_err_fn) wrapped_fn() self.assertEqual(1, mock_err_fn.call_count) with open(self.log_handler_file) as f: log_output = f.read() self.assertIn( 'Exception captured in mock_err_fn during metrics ' 'collection', log_output) # Test that exceptions in the unprotected metrics functions are caught. with mock.patch.object(MetricsCollector, 'GetCollector', return_value='not a collector'): # These calls should all fail, but the exceptions shouldn't propagate up. metrics.Shutdown() metrics.LogCommandParams() metrics.LogRetryableError() metrics.LogFatalError() metrics.LogPerformanceSummary() metrics.CheckAndMaybePromptForAnalyticsEnabling('invalid argument') with open(self.log_handler_file) as f: log_output = f.read() self.assertIn( 'Exception captured in Shutdown during metrics collection', log_output) self.assertIn( 'Exception captured in LogCommandParams during metrics collection', log_output) self.assertIn( 'Exception captured in LogRetryableError during metrics collection', log_output) self.assertIn( 'Exception captured in LogFatalError during metrics collection', log_output) self.assertIn( 'Exception captured in LogPerformanceSummary during metrics ' 'collection', log_output) self.assertIn( 'Exception captured in CheckAndMaybePromptForAnalyticsEnabling ' 'during metrics collection', log_output) self.root_logger.setLevel(original_log_level)
def testGAEventsCollection(self): """Tests the collection of each event category.""" self.assertEqual([], self.collector._metrics) _LogAllTestMetrics() # Only the first command should be logged. metrics.LogCommandParams(command_name='cmd2') # Commands and errors should not be collected until we explicitly collect # them. self.assertEqual([], self.collector._metrics) self.collector._CollectCommandAndErrorMetrics() self.assertEqual(COMMAND_AND_ERROR_METRICS, self.collector._metrics) metrics.LogPerformanceSummary(True) perfsum1_metric = metrics._Metric( 'https://example.com', 'POST', 'ec={0}&ea=Upload&el={1}&ev=0{2}'.format( metrics._GA_PERFSUM_CATEGORY, VERSION, GLOBAL_DIMENSIONS_URL), 'user-agent-007') self.assertEqual(COMMAND_AND_ERROR_METRICS + [perfsum1_metric], self.collector._metrics) metrics.LogPerformanceSummary(False) perfsum2_metric = metrics._Metric( 'https://example.com', 'POST', 'ec={0}&ea=Download&el={1}&ev=0{2}'.format( metrics._GA_PERFSUM_CATEGORY, VERSION, GLOBAL_DIMENSIONS_URL), 'user-agent-007') self.assertEqual( COMMAND_AND_ERROR_METRICS + [perfsum1_metric, perfsum2_metric], self.collector._metrics)
def testGAEventsCollection(self): """Tests the collection of each event category.""" self.assertEqual([], self.collector._metrics) _LogAllTestMetrics() # Only the first command should be logged. metrics.LogCommandParams(command_name='cmd2') # Commands and errors should not be collected until we explicitly collect # them. self.assertEqual([], self.collector._metrics) self.collector._CollectCommandAndErrorMetrics() self.assertEqual(COMMAND_AND_ERROR_TEST_METRICS, MetricListToTestMetricSet(self.collector._metrics)) metrics.LogPerformanceSummary(True) perfsum1_metric = TestMetric( 'https://example.com', 'POST', frozenset(GLOBAL_PARAMETERS + ['ec=' + metrics._GA_PERFSUM_CATEGORY, ('ea=Upload')]), 'user-agent-007') COMMAND_AND_ERROR_TEST_METRICS.add(perfsum1_metric) self.assertEqual(COMMAND_AND_ERROR_TEST_METRICS, MetricListToTestMetricSet(self.collector._metrics)) metrics.LogPerformanceSummary(False) perfsum2_metric = TestMetric( 'https://example.com', 'POST', frozenset(GLOBAL_PARAMETERS + ['ec=' + metrics._GA_PERFSUM_CATEGORY, ('ea=Download')]), 'user-agent-007') COMMAND_AND_ERROR_TEST_METRICS.add(perfsum2_metric) self.assertEqual(COMMAND_AND_ERROR_TEST_METRICS, MetricListToTestMetricSet(self.collector._metrics))
def RunCommand(self): """Command entry point for the kms command.""" # If the only credential type the user supplies in their boto file is hmac, # GetApiSelector logic will force us to use the XML API. As the XML API does # not support all the operations needed for kms subcommands, fail early. if self.gsutil_api.GetApiSelector(provider='gs') != ApiSelector.JSON: raise CommandException('\n'.join( textwrap.wrap( 'The "%s" command can only be used with the GCS JSON API, which ' 'cannot use HMAC credentials. Please supply a credential ' 'type that is compatible with the JSON API (e.g. OAuth2) in your ' 'boto config file.' % self.command_name))) method_for_subcommand = { 'authorize': KmsCommand._Authorize, 'encryption': KmsCommand._Encryption, 'serviceaccount': KmsCommand._ServiceAccount } self.subcommand_name = self.args.pop(0) if self.subcommand_name in method_for_subcommand: metrics.LogCommandParams(subcommands=[self.subcommand_name]) return self._RunSubCommand(method_for_subcommand[self.subcommand_name]) else: raise CommandException('Invalid subcommand "%s" for the %s command.' % (self.subcommand_name, self.command_name))
def CollectMetricAndSetLogLevel(log_level, log_file_path): metrics.LogCommandParams(command_name='cmd1', subcommands=['action1'], sub_opts=[('optb', ''), ('opta', '')]) metrics.LogFatalError(gslib.exception.CommandException('test')) # Wait for report to make sure the log is written before we check it. self.collector.ReportMetrics(wait_for_report=True, log_level=log_level, log_file_path=log_file_path) self.assertEqual([], self.collector._metrics)
def RunCommand(self): """Command entry point for the bucketpolicyonly command.""" if self.gsutil_api.GetApiSelector(provider='gs') != ApiSelector.JSON: raise CommandException('\n'.join( textwrap.wrap( 'The "%s" command can only be used with the Cloud Storage JSON API.' % self.command_name))) action_subcommand = self.args[0] self.ParseSubOpts(check_args=True) if action_subcommand == 'get' or action_subcommand == 'set': metrics.LogCommandParams(sub_opts=self.sub_opts) metrics.LogCommandParams(subcommands=[action_subcommand]) self._BucketPolicyOnly() else: raise CommandException('Invalid subcommand "%s", use get|set instead.' % action_subcommand)
def RunCommand(self): """Command entry point for the notification command.""" self.subcommand_name = self.args.pop(0) if self.subcommand_name in NotificationCommand.SUBCOMMANDS: metrics.LogCommandParams(subcommands=[self.subcommand_name]) return self._RunSubCommand(NotificationCommand.SUBCOMMANDS[ self.subcommand_name]) else: raise CommandException('Invalid subcommand "%s" for the %s command.' % (self.subcommand_name, self.command_name))
def RunCommand(self): """Command entry point for the versioning command.""" action_subcommand = self.args.pop(0) if action_subcommand == 'get': func = self._GetVersioning metrics.LogCommandParams(subcommands=[action_subcommand]) elif action_subcommand == 'set': func = self._SetVersioning versioning_arg = self.args[0].lower() if versioning_arg in ('on', 'off'): metrics.LogCommandParams( subcommands=[action_subcommand, versioning_arg]) else: raise CommandException(( 'Invalid subcommand "%s" for the %s command.\n' 'See "gsutil help %s".') % ( action_subcommand, self.command_name, self.command_name)) func() return 0
def _RunSubCommand(self, func): try: self.sub_opts, self.args = getopt.getopt( self.args, self.command_spec.supported_sub_args) # Commands with both suboptions and subcommands need to reparse for # suboptions, so we log again. metrics.LogCommandParams(sub_opts=self.sub_opts) return func(self) except getopt.GetoptError: self.RaiseInvalidArgumentException()
def testMetricsPosting(self): """Tests the metrics posting process as performed in metrics_reporter.py.""" # Windows has odd restrictions about attempting to open a named tempfile # while it's open. Regardless of platform, we don't need the file to be open # or even exist; we only need a valid file path to create a log file at. metrics_file = tempfile.NamedTemporaryFile() metrics_file_name = metrics_file.name metrics_file.close() # Logging statements will create a file at the path we just fetched. Make # sure we clean up the file afterward. def MetricsTempFileCleanup(file_path): try: os.unlink(file_path) except OSError: # Don't fail if the file was already cleaned up. pass self.addCleanup(MetricsTempFileCleanup, metrics_file_name) # Collect a metric and set log level for the metrics_reporter subprocess. def CollectMetricAndSetLogLevel(log_level, log_file_path): metrics.LogCommandParams(command_name='cmd1', subcommands=['action1'], sub_opts=[('optb', ''), ('opta', '')]) metrics.LogFatalError(gslib.exception.CommandException('test')) # Wait for report to make sure the log is written before we check it. self.collector.ReportMetrics(wait_for_report=True, log_level=log_level, log_file_path=log_file_path) self.assertEqual([], self.collector._metrics) metrics.LogCommandParams( global_opts=[('-y', 'value'), ('-z', ''), ('-x', '')]) # The log file should be empty unless the debug option is specified. CollectMetricAndSetLogLevel(logging.DEBUG, metrics_file.name) with open(metrics_file.name, 'rb') as metrics_log: log_text = metrics_log.read() expected_response = ( 'Metric(endpoint=\'https://example.com\', method=\'POST\', ' 'body=\'{0}&cm2=0&ea=cmd1+action1&ec={1}&el={2}&ev=0\', ' 'user_agent=\'user-agent-007\')'.format(GLOBAL_DIMENSIONS_URL_PARAMS, metrics._GA_COMMANDS_CATEGORY, VERSION)) self.assertIn(expected_response, log_text) self.assertIn('RESPONSE: 200', log_text) CollectMetricAndSetLogLevel(logging.INFO, metrics_file.name) with open(metrics_file.name, 'rb') as metrics_log: log_text = metrics_log.read() self.assertEqual(log_text, '') CollectMetricAndSetLogLevel(logging.WARN, metrics_file.name) with open(metrics_file.name, 'rb') as metrics_log: log_text = metrics_log.read() self.assertEqual(log_text, '')
def _LogAllTestMetrics(): """Logs all the common metrics for a test.""" metrics.LogCommandParams(command_name='cmd1', subcommands=['action1'], global_opts=[('-y', 'value'), ('-z', ''), ('-x', '')], sub_opts=[('optb', ''), ('opta', '')]) metrics.LogRetryableError('retryable_error_type_1') metrics.LogRetryableError('retryable_error_type_1') metrics.LogRetryableError('retryable_error_type_2') metrics.LogFatalError(gslib.exception.CommandException('test'))
def _LogAllTestMetrics(): """Logs all the common metrics for a test.""" metrics.LogCommandParams( command_name='cmd1', subcommands=['action1'], global_opts=[('-y', 'value'), ('-z', ''), ('-x', '')], sub_opts=[('optb', ''), ('opta', '')]) retry_msg_1 = RetryableErrorMessage(Exception(), 0) retry_msg_2 = RetryableErrorMessage(ValueError(), 0) metrics.LogRetryableError(retry_msg_1) metrics.LogRetryableError(retry_msg_1) metrics.LogRetryableError(retry_msg_2) metrics.LogFatalError(gslib.exception.CommandException('test'))
def RunCommand(self): """Command entry point for the label command.""" action_subcommand = self.args.pop(0) self.ParseSubOpts(check_args=True) # Commands with both suboptions and subcommands need to reparse for # suboptions, so we log again. metrics.LogCommandParams(sub_opts=self.sub_opts) if action_subcommand == 'get': metrics.LogCommandParams(subcommands=[action_subcommand]) self._GetAndPrintLabel(self.args[0]) elif action_subcommand == 'set': metrics.LogCommandParams(subcommands=[action_subcommand]) self._SetLabel() elif action_subcommand == 'ch': metrics.LogCommandParams(subcommands=[action_subcommand]) self._ChLabel() else: raise CommandException( 'Invalid subcommand "%s" for the %s command.\nSee "gsutil help %s".' % (action_subcommand, self.command_name, self.command_name)) return 0
def RunCommand(self): """Command entry point for the cors command.""" action_subcommand = self.args.pop(0) if action_subcommand == 'get': func = self._GetCors elif action_subcommand == 'set': func = self._SetCors else: raise CommandException( ('Invalid subcommand "%s" for the %s command.\n' 'See "gsutil help cors".') % (action_subcommand, self.command_name)) metrics.LogCommandParams(subcommands=[action_subcommand]) return func()
def testCommandAndErrorEventsCollection(self): """Tests the collection of command and error GA events.""" self.assertEqual([], self.collector._metrics) _LogAllTestMetrics() # Only the first command should be logged. metrics.LogCommandParams(command_name='cmd2') # Commands and errors should not be collected until we explicitly collect # them. self.assertEqual([], self.collector._metrics) self.collector._CollectCommandAndErrorMetrics() self.assertEqual(COMMAND_AND_ERROR_TEST_METRICS, set(self.collector._metrics))
def testExceptionCatchingDecorator(self): """Tests the exception catching decorator CaptureAndLogException.""" # A wrapped function with an exception should not stop the process. mock_exc_fn = mock.MagicMock(__name__='mock_exc_fn', side_effect=Exception()) wrapped_fn = metrics.CaptureAndLogException(mock_exc_fn) wrapped_fn() debug_messages = self.log_handler.messages['debug'] self.assertIn('Exception captured in mock_exc_fn during metrics collection', debug_messages[0]) self.log_handler.reset() self.assertEqual(1, mock_exc_fn.call_count) mock_err_fn = mock.MagicMock(__name__='mock_err_fn', side_effect=TypeError()) wrapped_fn = metrics.CaptureAndLogException(mock_err_fn) wrapped_fn() self.assertEqual(1, mock_err_fn.call_count) debug_messages = self.log_handler.messages['debug'] self.assertIn('Exception captured in mock_err_fn during metrics collection', debug_messages[0]) self.log_handler.reset() # Test that exceptions in the unprotected metrics functions are caught. with mock.patch.object(MetricsCollector, 'GetCollector', return_value='not a collector'): # These calls should all fail, but the exceptions shouldn't propagate up. metrics.Shutdown() metrics.LogCommandParams() metrics.LogRetryableError() metrics.LogFatalError() metrics.LogPerformanceSummaryParams() metrics.CheckAndMaybePromptForAnalyticsEnabling('invalid argument') debug_messages = self.log_handler.messages['debug'] message_index = 0 for func_name in ('Shutdown', 'LogCommandParams', 'LogRetryableError', 'LogFatalError', 'LogPerformanceSummaryParams', 'CheckAndMaybePromptForAnalyticsEnabling'): self.assertIn( 'Exception captured in %s during metrics collection' % func_name, debug_messages[message_index]) message_index += 1 self.log_handler.reset()
def RunCommand(self): """Command entry point for the web command.""" action_subcommand = self.args.pop(0) self.ParseSubOpts(check_args=True) if action_subcommand == 'get': func = self._GetWeb elif action_subcommand == 'set': func = self._SetWeb else: raise CommandException(('Invalid subcommand "%s" for the %s command.\n' 'See "gsutil help web".') % (action_subcommand, self.command_name)) # Commands with both suboptions and subcommands need to reparse for # suboptions, so we log again. metrics.LogCommandParams(subcommands=[action_subcommand], sub_opts=self.sub_opts) return func()
def RunCommand(self): """Command entry point for the defstorageclass command.""" action_subcommand = self.args.pop(0) subcommand_args = [action_subcommand] if action_subcommand == 'get': func = self._GetDefStorageClass elif action_subcommand == 'set': func = self._SetDefStorageClass normalized_storage_class = NormalizeStorageClass(self.args[0]) subcommand_args.append(normalized_storage_class) else: raise CommandException( ('Invalid subcommand "%s" for the %s command.\n' 'See "gsutil help %s".') % (action_subcommand, self.command_name, self.command_name)) metrics.LogCommandParams(subcommands=subcommand_args) func() return 0
def RunCommand(self): """Command entry point for the retention command.""" # If the only credential type the user supplies in their boto file is HMAC, # GetApiSelector logic will force us to use the XML API, which bucket lock # does not support at the moment. if self.gsutil_api.GetApiSelector('gs') != ApiSelector.JSON: raise CommandException( ('The {} command can only be used with the GCS ' 'JSON API. If you have only supplied hmac ' 'credentials in your boto file, please instead ' 'supply a credential type that can be used with ' 'the JSON API.').format(self.command_name)) self.preconditions = PreconditionsFromHeaders(self.headers) action_subcommand = self.args.pop(0) self.ParseSubOpts(check_args=True) if action_subcommand == 'set': func = self._SetRetention elif action_subcommand == 'clear': func = self._ClearRetention elif action_subcommand == 'get': func = self._GetRetention elif action_subcommand == 'lock': func = self._LockRetention elif action_subcommand == 'event-default': func = self._DefaultEventHold elif action_subcommand == 'event': func = self._EventHold elif action_subcommand == 'temp': func = self._TempHold else: raise CommandException( ('Invalid subcommand "{}" for the {} command.\n' 'See "gsutil help retention".').format( action_subcommand, self.command_name)) # Commands with both suboptions and subcommands need to reparse for # suboptions, so we log again. metrics.LogCommandParams(subcommands=[action_subcommand], sub_opts=self.sub_opts) return func()
def testMetricsPosting(self): """Tests the metrics posting process as performed in metrics_reporter.py.""" # Clear the log file. open(LOG_FILE_PATH, 'w').close() metrics.LogCommandParams(global_opts=[('-y', 'value'), ('-z', ''), ('-x', '')]) # Collect a metric and set log level for the metrics_reporter subprocess. def CollectMetricAndSetLogLevel(log_level): metrics.LogCommandParams(command_name='cmd1', subcommands=['action1'], sub_opts=[('optb', ''), ('opta', '')]) metrics.LogFatalError(gslib.exception.CommandException('test')) # Wait for report to make sure the log is written before we check it. self.collector.ReportMetrics(wait_for_report=True, log_level=log_level) self.assertEqual([], self.collector._metrics) # The log file should be empty unless the debug option is specified. CollectMetricAndSetLogLevel(logging.DEBUG) with open(LOG_FILE_PATH, 'rb') as metrics_log: log_text = metrics_log.read() expected_request = ( '_Metric(endpoint=\'https://example.com\', method=\'POST\', ' 'body=\'ec={0}&ea=cmd1+action1&el={1}&ev=0&cm2=0{2}\', ' 'user_agent=\'user-agent-007\')').format( metrics._GA_COMMANDS_CATEGORY, VERSION, GLOBAL_DIMENSIONS_URL) self.assertIn(expected_request, log_text) self.assertIn('RESPONSE: 200', log_text) CollectMetricAndSetLogLevel(logging.INFO) with open(LOG_FILE_PATH, 'rb') as metrics_log: log_text = metrics_log.read() self.assertEqual(log_text, '') CollectMetricAndSetLogLevel(logging.WARN) with open(LOG_FILE_PATH, 'rb') as metrics_log: log_text = metrics_log.read() self.assertEqual(log_text, '')
def RunCommand(self): """Command entry point for the defacl command.""" action_subcommand = self.args.pop(0) self.ParseSubOpts(check_args=True) self.def_acl = True self.continue_on_error = False if action_subcommand == 'get': func = self._GetDefAcl elif action_subcommand == 'set': func = self._SetDefAcl elif action_subcommand in ('ch', 'change'): func = self._ChDefAcl else: raise CommandException(('Invalid subcommand "%s" for the %s command.\n' 'See "gsutil help defacl".') % (action_subcommand, self.command_name)) # Commands with both suboptions and subcommands need to reparse for # suboptions, so we log again. metrics.LogCommandParams(subcommands=[action_subcommand], sub_opts=self.sub_opts) func() return 0
def RunNamedCommand(self, command_name, args=None, headers=None, debug=0, trace_token=None, parallel_operations=False, skip_update_check=False, logging_filters=None, do_shutdown=True, perf_trace_token=None, user_project=None, collect_analytics=False): """Runs the named command. Used by gsutil main, commands built atop other commands, and tests. Args: command_name: The name of the command being run. args: Command-line args (arg0 = actual arg, not command name ala bash). headers: Dictionary containing optional HTTP headers to pass to boto. debug: Debug level to pass in to boto connection (range 0..3). trace_token: Trace token to pass to the underlying API. parallel_operations: Should command operations be executed in parallel? skip_update_check: Set to True to disable checking for gsutil updates. logging_filters: Optional list of logging.Filters to apply to this command's logger. do_shutdown: Stop all parallelism framework workers iff this is True. perf_trace_token: Performance measurement trace token to pass to the underlying API. user_project: The project to bill this request to. collect_analytics: Set to True to collect an analytics metric logging this command. Raises: CommandException: if errors encountered. Returns: Return value(s) from Command that was run. """ command_changed_to_update = False if (not skip_update_check and self.MaybeCheckForAndOfferSoftwareUpdate( command_name, debug)): command_name = 'update' command_changed_to_update = True args = ['-n'] # Check for opt-in analytics. if system_util.IsRunningInteractively() and collect_analytics: metrics.CheckAndMaybePromptForAnalyticsEnabling() if not args: args = [] # Include api_version header in all commands. api_version = boto.config.get_value('GSUtil', 'default_api_version', '1') if not headers: headers = {} headers['x-goog-api-version'] = api_version if command_name not in self.command_map: close_matches = difflib.get_close_matches(command_name, self.command_map.keys(), n=1) if close_matches: # Instead of suggesting a deprecated command alias, suggest the new # name for that command. translated_command_name = (OLD_ALIAS_MAP.get( close_matches[0], close_matches)[0]) print >> sys.stderr, 'Did you mean this?' print >> sys.stderr, '\t%s' % translated_command_name elif command_name == 'update' and gslib.IS_PACKAGE_INSTALL: sys.stderr.write( 'Update command is not supported for package installs; ' 'please instead update using your package manager.') raise CommandException('Invalid command "%s".' % command_name) if '--help' in args: new_args = [command_name] original_command_class = self.command_map[command_name] subcommands = original_command_class.help_spec.subcommand_help_text.keys( ) for arg in args: if arg in subcommands: new_args.append(arg) break # Take the first match and throw away the rest. args = new_args command_name = 'help' HandleArgCoding(args) HandleHeaderCoding(headers) command_class = self.command_map[command_name] command_inst = command_class(self, args, headers, debug, trace_token, parallel_operations, self.bucket_storage_uri_class, self.gsutil_api_class_map_factory, logging_filters, command_alias_used=command_name, perf_trace_token=perf_trace_token, user_project=user_project) # Log the command name, command alias, and sub-options after being parsed by # RunCommand and the command constructor. For commands with subcommands and # suboptions, we need to log the suboptions again within the command itself # because the command constructor will not parse the suboptions fully. if collect_analytics: metrics.LogCommandParams(command_name=command_inst.command_name, sub_opts=command_inst.sub_opts, command_alias=command_name) return_code = command_inst.RunCommand() if CheckMultiprocessingAvailableAndInit().is_available and do_shutdown: ShutDownGsutil() if GetFailureCount() > 0: return_code = 1 if command_changed_to_update: # If the command changed to update, the user's original command was # not executed. return_code = 1 print '\n'.join( textwrap.wrap( 'Update was successful. Exiting with code 1 as the original command ' 'issued prior to the update was not executed and should be re-run.' )) return return_code
def main(): InitializeSignalHandling() # Any modules used in initializing multiprocessing variables must be # imported after importing gslib.__main__. # pylint: disable=redefined-outer-name,g-import-not-at-top import gslib.boto_translation import gslib.command import gslib.util from gslib.util import BOTO_IS_SECURE from gslib.util import CERTIFICATE_VALIDATION_ENABLED # pylint: disable=unused-variable from gcs_oauth2_boto_plugin import oauth2_client from apitools.base.py import credentials_lib # pylint: enable=unused-variable from gslib.util import CheckMultiprocessingAvailableAndInit if CheckMultiprocessingAvailableAndInit().is_available: # These setup methods must be called, and, on Windows, they can only be # called from within an "if __name__ == '__main__':" block. gslib.command.InitializeMultiprocessingVariables() gslib.boto_translation.InitializeMultiprocessingVariables() else: gslib.command.InitializeThreadingVariables() # This needs to be done after gslib.util.InitializeMultiprocessingVariables(), # since otherwise we can't call gslib.util.CreateLock. try: # pylint: disable=unused-import,g-import-not-at-top import gcs_oauth2_boto_plugin gsutil_client_id, gsutil_client_secret = GetGsutilClientIdAndSecret() gcs_oauth2_boto_plugin.oauth2_helper.SetFallbackClientIdAndSecret( gsutil_client_id, gsutil_client_secret) gcs_oauth2_boto_plugin.oauth2_helper.SetLock(CreateLock()) credentials_lib.SetCredentialsCacheFileLock(CreateLock()) except ImportError: pass global debug global test_exception_traces if not (2, 7) <= sys.version_info[:3] < (3,): raise CommandException('gsutil requires python 2.7.') # In gsutil 4.0 and beyond, we don't use the boto library for the JSON # API. However, we still store gsutil configuration data in the .boto # config file for compatibility with previous versions and user convenience. # Many users have a .boto configuration file from previous versions, and it # is useful to have all of the configuration for gsutil stored in one place. command_runner = CommandRunner() if not BOTO_IS_SECURE: raise CommandException('\n'.join(textwrap.wrap( 'Your boto configuration has is_secure = False. Gsutil cannot be ' 'run this way, for security reasons.'))) headers = {} parallel_operations = False quiet = False version = False debug = 0 trace_token = None perf_trace_token = None test_exception_traces = False # If user enters no commands just print the usage info. if len(sys.argv) == 1: sys.argv.append('help') # Change the default of the 'https_validate_certificates' boto option to # True (it is currently False in boto). if not boto.config.has_option('Boto', 'https_validate_certificates'): if not boto.config.has_section('Boto'): boto.config.add_section('Boto') boto.config.setbool('Boto', 'https_validate_certificates', True) gslib.util.configured_certs_file = gslib.util.ConfigureCertsFile() for signal_num in GetCaughtSignals(): RegisterSignalHandler(signal_num, _CleanupSignalHandler) GetCertsFile() try: try: opts, args = getopt.getopt(sys.argv[1:], 'dDvo:h:mq', ['debug', 'detailedDebug', 'version', 'option', 'help', 'header', 'multithreaded', 'quiet', 'testexceptiontraces', 'trace-token=', 'perf-trace-token=']) except getopt.GetoptError as e: _HandleCommandException(CommandException(e.msg)) for o, a in opts: if o in ('-d', '--debug'): # Also causes boto to include httplib header output. debug = DEBUGLEVEL_DUMP_REQUESTS elif o in ('-D', '--detailedDebug'): # We use debug level 3 to ask gsutil code to output more detailed # debug output. This is a bit of a hack since it overloads the same # flag that was originally implemented for boto use. And we use -DD # to ask for really detailed debugging (i.e., including HTTP payload). if debug == DEBUGLEVEL_DUMP_REQUESTS: debug = DEBUGLEVEL_DUMP_REQUESTS_AND_PAYLOADS else: debug = DEBUGLEVEL_DUMP_REQUESTS elif o in ('-?', '--help'): _OutputUsageAndExit(command_runner) elif o in ('-h', '--header'): (hdr_name, _, hdr_val) = a.partition(':') if not hdr_name: _OutputUsageAndExit(command_runner) headers[hdr_name.lower()] = hdr_val elif o in ('-m', '--multithreaded'): parallel_operations = True elif o in ('-q', '--quiet'): quiet = True elif o in ('-v', '--version'): version = True elif o == '--perf-trace-token': perf_trace_token = a elif o == '--trace-token': trace_token = a elif o == '--testexceptiontraces': # Hidden flag for integration tests. test_exception_traces = True # Avoid printing extra warnings to stderr regarding long retries by # setting the threshold very high. gslib.util.LONG_RETRY_WARN_SEC = 3600 elif o in ('-o', '--option'): (opt_section_name, _, opt_value) = a.partition('=') if not opt_section_name: _OutputUsageAndExit(command_runner) (opt_section, _, opt_name) = opt_section_name.partition(':') if not opt_section or not opt_name: _OutputUsageAndExit(command_runner) if not boto.config.has_section(opt_section): boto.config.add_section(opt_section) boto.config.set(opt_section, opt_name, opt_value) metrics.LogCommandParams(global_opts=opts) httplib2.debuglevel = debug if trace_token: sys.stderr.write(TRACE_WARNING) if debug >= DEBUGLEVEL_DUMP_REQUESTS: sys.stderr.write(DEBUG_WARNING) _ConfigureLogging(level=logging.DEBUG) command_runner.RunNamedCommand('ver', ['-l']) config_items = [] try: config_items.extend(boto.config.items('Boto')) config_items.extend(boto.config.items('GSUtil')) except ConfigParser.NoSectionError: pass for i in xrange(len(config_items)): config_item_key = config_items[i][0] if config_item_key in CONFIG_KEYS_TO_REDACT: config_items[i] = (config_item_key, 'REDACTED') sys.stderr.write('Command being run: %s\n' % ' '.join(sys.argv)) sys.stderr.write('config_file_list: %s\n' % GetBotoConfigFileList()) sys.stderr.write('config: %s\n' % str(config_items)) elif quiet: _ConfigureLogging(level=logging.WARNING) else: _ConfigureLogging(level=logging.INFO) # oauth2client uses info logging in places that would better # correspond to gsutil's debug logging (e.g., when refreshing # access tokens). oauth2client.client.logger.setLevel(logging.WARNING) if not CERTIFICATE_VALIDATION_ENABLED: sys.stderr.write(HTTP_WARNING) if version: command_name = 'version' elif not args: command_name = 'help' else: command_name = args[0] _CheckAndWarnForProxyDifferences() if os.environ.get('_ARGCOMPLETE', '0') == '1': return _PerformTabCompletion(command_runner) return _RunNamedCommandAndHandleExceptions( command_runner, command_name, args=args[1:], headers=headers, debug_level=debug, trace_token=trace_token, parallel_operations=parallel_operations, perf_trace_token=perf_trace_token) finally: _Cleanup()
def main(): InitializeSignalHandling() # Any modules used in initializing multiprocessing variables must be # imported after importing gslib.__main__. # pylint: disable=redefined-outer-name,g-import-not-at-top import gslib.boto_translation import gslib.command import gslib.utils.parallelism_framework_util # pylint: disable=unused-variable from gcs_oauth2_boto_plugin import oauth2_client from apitools.base.py import credentials_lib # pylint: enable=unused-variable if (gslib.utils.parallelism_framework_util. CheckMultiprocessingAvailableAndInit().is_available): # These setup methods must be called, and, on Windows, they can only be # called from within an "if __name__ == '__main__':" block. gslib.command.InitializeMultiprocessingVariables() gslib.boto_translation.InitializeMultiprocessingVariables() else: gslib.command.InitializeThreadingVariables() # This needs to be done after InitializeMultiprocessingVariables(), since # otherwise we can't call CreateLock. try: # pylint: disable=unused-import,g-import-not-at-top import gcs_oauth2_boto_plugin gsutil_client_id, gsutil_client_secret = ( system_util.GetGsutilClientIdAndSecret()) gcs_oauth2_boto_plugin.oauth2_helper.SetFallbackClientIdAndSecret( gsutil_client_id, gsutil_client_secret) gcs_oauth2_boto_plugin.oauth2_helper.SetLock( gslib.utils.parallelism_framework_util.CreateLock()) credentials_lib.SetCredentialsCacheFileLock( gslib.utils.parallelism_framework_util.CreateLock()) except ImportError: pass global debug_level global test_exception_traces supported, err = check_python_version_support() if not supported: raise CommandException(err) sys.exit(1) boto_util.MonkeyPatchBoto() system_util.MonkeyPatchHttp() # In gsutil 4.0 and beyond, we don't use the boto library for the JSON # API. However, we still store gsutil configuration data in the .boto # config file for compatibility with previous versions and user convenience. # Many users have a .boto configuration file from previous versions, and it # is useful to have all of the configuration for gsutil stored in one place. command_runner = CommandRunner() if not boto_util.BOTO_IS_SECURE: raise CommandException('\n'.join( textwrap.wrap( 'Your boto configuration has is_secure = False. Gsutil cannot be ' 'run this way, for security reasons.'))) headers = {} parallel_operations = False quiet = False version = False debug_level = 0 trace_token = None perf_trace_token = None test_exception_traces = False user_project = None # If user enters no commands just print the usage info. if len(sys.argv) == 1: sys.argv.append('help') # Change the default of the 'https_validate_certificates' boto option to # True (it is currently False in boto). if not boto.config.has_option('Boto', 'https_validate_certificates'): if not boto.config.has_section('Boto'): boto.config.add_section('Boto') boto.config.setbool('Boto', 'https_validate_certificates', True) for signal_num in GetCaughtSignals(): RegisterSignalHandler(signal_num, _CleanupSignalHandler) try: for o, a in opts: if o in ('-d', '--debug'): # Also causes boto to include httplib header output. debug_level = constants.DEBUGLEVEL_DUMP_REQUESTS elif o in ('-D', '--detailedDebug'): # We use debug level 3 to ask gsutil code to output more detailed # debug output. This is a bit of a hack since it overloads the same # flag that was originally implemented for boto use. And we use -DD # to ask for really detailed debugging (i.e., including HTTP payload). if debug_level == constants.DEBUGLEVEL_DUMP_REQUESTS: debug_level = constants.DEBUGLEVEL_DUMP_REQUESTS_AND_PAYLOADS else: debug_level = constants.DEBUGLEVEL_DUMP_REQUESTS elif o in ('-?', '--help'): _OutputUsageAndExit(command_runner) elif o in ('-h', '--header'): (hdr_name, _, hdr_val) = a.partition(':') if not hdr_name: _OutputUsageAndExit(command_runner) headers[hdr_name.lower()] = hdr_val elif o in ('-m', '--multithreaded'): parallel_operations = True elif o in ('-q', '--quiet'): quiet = True elif o == '-u': user_project = a elif o in ('-v', '--version'): version = True elif o in ('-i', '--impersonate-service-account'): constants.IMPERSONATE_SERVICE_ACCOUNT = a elif o == '--perf-trace-token': perf_trace_token = a elif o == '--trace-token': trace_token = a elif o == '--testexceptiontraces': # Hidden flag for integration tests. test_exception_traces = True # Avoid printing extra warnings to stderr regarding long retries by # setting the threshold very high. constants.LONG_RETRY_WARN_SEC = 3600 elif o in ('-o', '--option'): (opt_section_name, _, opt_value) = a.partition('=') if not opt_section_name: _OutputUsageAndExit(command_runner) (opt_section, _, opt_name) = opt_section_name.partition(':') if not opt_section or not opt_name: _OutputUsageAndExit(command_runner) if not boto.config.has_section(opt_section): boto.config.add_section(opt_section) boto.config.set(opt_section, opt_name, opt_value) # Now that any Boto option overrides (via `-o` args) have been parsed, # perform initialization that depends on those options. boto_util.configured_certs_file = (boto_util.ConfigureCertsFile()) metrics.LogCommandParams(global_opts=opts) httplib2.debuglevel = debug_level if trace_token: sys.stderr.write(TRACE_WARNING) if debug_level >= constants.DEBUGLEVEL_DUMP_REQUESTS: sys.stderr.write(DEBUG_WARNING) _ConfigureRootLogger(level=logging.DEBUG) command_runner.RunNamedCommand('ver', ['-l']) config_items = [] for config_section in ('Boto', 'GSUtil'): try: config_items.extend(boto.config.items(config_section)) except configparser.NoSectionError: pass for i in range(len(config_items)): config_item_key = config_items[i][0] if config_item_key in CONFIG_KEYS_TO_REDACT: config_items[i] = (config_item_key, 'REDACTED') sys.stderr.write('Command being run: %s\n' % ' '.join(sys.argv)) sys.stderr.write('config_file_list: %s\n' % boto_util.GetFriendlyConfigFilePaths()) sys.stderr.write('config: %s\n' % str(config_items)) else: # Non-debug log level. root_logger_level = logging.WARNING if quiet else logging.INFO # oauth2client uses INFO and WARNING logging in places that would better # correspond to gsutil's debug logging (e.g., when refreshing # access tokens), so we bump the threshold one level higher where # appropriate. These log levels work for regular- and quiet-level logging. oa2c_logger_level = logging.WARNING oa2c_multiprocess_file_storage_logger_level = logging.ERROR _ConfigureRootLogger(level=root_logger_level) oauth2client.client.logger.setLevel(oa2c_logger_level) oauth2client.contrib.multiprocess_file_storage.logger.setLevel( oa2c_multiprocess_file_storage_logger_level) # pylint: disable=protected-access oauth2client.transport._LOGGER.setLevel(oa2c_logger_level) reauth_creds._LOGGER.setLevel(oa2c_logger_level) # pylint: enable=protected-access # Initialize context configuration for device mTLS. context_config.create_context_config(logging.getLogger()) # TODO(reauth): Fix once reauth pins to pyu2f version newer than 0.1.3. # Fixes pyu2f v0.1.3 bug. import six # pylint: disable=g-import-not-at-top six.input = six.moves.input if not boto_util.CERTIFICATE_VALIDATION_ENABLED: sys.stderr.write(HTTP_WARNING) if version: command_name = 'version' elif not args: command_name = 'help' else: command_name = args[0] _CheckAndWarnForProxyDifferences() # Both 1 and 2 are valid _ARGCOMPLETE values; this var tells argcomplete at # what argv[] index the command to match starts. We want it to start at the # value for the path to gsutil, so: # $ gsutil <command> # Should be the 1st argument, so '1' # $ python gsutil <command> # Should be the 2nd argument, so '2' # Both are valid; most users invoke gsutil in the first style, but our # integration and prerelease tests invoke it in the second style, as we need # to specify the Python interpreter used to run gsutil. if os.environ.get('_ARGCOMPLETE', '0') in ('1', '2'): return _PerformTabCompletion(command_runner) return _RunNamedCommandAndHandleExceptions( command_runner, command_name, args=args[1:], headers=headers, debug_level=debug_level, trace_token=trace_token, parallel_operations=parallel_operations, perf_trace_token=perf_trace_token, user_project=user_project) finally: _Cleanup()