Ejemplo n.º 1
0
    def testGetProject_NoProjectSet(self):
        # Mock out properties so that core.project appears to not be set by user.
        self.prop_mock = self.StartObjectPatch(properties.VALUES.core.project,
                                               'Get')
        self.prop_mock.return_value = None

        with self.assertRaises(exceptions.MissingProjectError) as ex_ctx:
            util.GetProject()
        self.assertIn('No project specified', six.text_type(ex_ctx.exception))
        self.assertIn('gcloud config set project',
                      six.text_type(ex_ctx.exception))
Ejemplo n.º 2
0
    def __init__(self, args, context, history_id, gcs_results_root):
        """Construct a MatrixCreator to be used to create a single test matrix.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        gcloud command invocation (i.e. group and command arguments combined).
      context: {str:obj} dict containing the gcloud command context, which
        includes the Testing API client+messages libs generated by Apitools.
      history_id: {str} A history ID to publish Tool Results to.
      gcs_results_root: the root dir for a matrix within the GCS results bucket.
    """
        self._project = util.GetProject()
        self._args = args
        self._history_id = history_id
        self._gcs_results_root = gcs_results_root
        self._client = context['testing_client']
        self._messages = context['testing_messages']
Ejemplo n.º 3
0
    def __init__(self,
                 matrix_id,
                 test_type,
                 context,
                 clock=datetime.datetime.now,
                 status_interval_secs=None):
        """Construct a MatrixMonitor to monitor a single test matrix instance.

    Args:
      matrix_id: {str} the unique ID of the matrix being monitored.
      test_type: {str} the type of matrix test being run (e.g. 'robo')
      context: {str:obj} dict containing the gcloud command context, which
        includes the Testing API client & messages libs generated by Apitools.
      clock: injected function which returns a current datetime object when
        called. Used to generate time-stamps on progress messages.
      status_interval_secs: {float} how long to sleep between status checks.
    """
        self.matrix_id = matrix_id
        self._test_type = test_type
        self._client = context['testing_client']
        self._messages = context['testing_messages']
        self._clock = clock
        self._project = util.GetProject()
        self._max_status_length = 0

        if status_interval_secs is not None:
            self._status_interval_secs = status_interval_secs
        else:
            self._status_interval_secs = (
                properties.VALUES.test.matrix_status_interval.GetInt()
                or _DEFAULT_STATUS_INTERVAL_SECS)
            # Poll for matrix status half as fast if the end user is not running in
            # interactive mode (i.e. either sys.stdin or sys.stderr is not a terminal
            # i/o stream) such as when gcloud is called by a CI system like Jenkins).
            # This reduces Testing service load and API quota usage.
            if not console_io.IsInteractive(error=True):
                self._status_interval_secs *= 2

        exec_states = self._messages.TestExecution.StateValueValuesEnum
        self._state_names = {
            exec_states.VALIDATING: 'Validating',
            exec_states.PENDING: 'Pending',
            exec_states.RUNNING: 'Running',
            exec_states.FINISHED: 'Finished',
            exec_states.ERROR: 'Error',
            exec_states.UNSUPPORTED_ENVIRONMENT: 'Unsupported',
            exec_states.INCOMPATIBLE_ENVIRONMENT: 'Incompatible Environment',
            exec_states.INCOMPATIBLE_ARCHITECTURE: 'Incompatible Architecture',
            exec_states.CANCELLED: 'Cancelled',
            exec_states.INVALID: 'Invalid',
            exec_states.TEST_STATE_UNSPECIFIED: '*Unspecified*',
        }
        self._completed_execution_states = set([
            exec_states.FINISHED,
            exec_states.ERROR,
            exec_states.UNSUPPORTED_ENVIRONMENT,
            exec_states.INCOMPATIBLE_ENVIRONMENT,
            exec_states.INCOMPATIBLE_ARCHITECTURE,
            exec_states.CANCELLED,
            exec_states.INVALID,
        ])
        matrix_states = self._messages.TestMatrix.StateValueValuesEnum
        self.completed_matrix_states = set([
            matrix_states.FINISHED,
            matrix_states.ERROR,
            matrix_states.CANCELLED,
            matrix_states.INVALID,
        ])
Ejemplo n.º 4
0
    def Run(self, args):
        """Run the 'firebase test ios run' command to invoke a test in Test Lab.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation (i.e. group and command arguments combined).

    Returns:
      One of:
        - a list of TestOutcome tuples (if ToolResults are available).
        - a URL string pointing to the user's results in ToolResults or GCS.
    """
        # TODO(b/79369595): expand libs to share more code with android run command.
        if args.async_ and not args.IsSpecified('format'):
            args.format = """
          value(format('Final test results will be available at [{0}].', []))
      """
        log.status.Print(
            '\nHave questions, feedback, or issues? Get support by '
            'emailing:\n  [email protected]\n')

        arg_manager.IosArgsManager().Prepare(args)

        project = util.GetProject()
        tr_client = self.context['toolresults_client']
        tr_messages = self.context['toolresults_messages']
        storage_client = self.context['storage_client']

        bucket_ops = results_bucket.ResultsBucketOps(project,
                                                     args.results_bucket,
                                                     args.results_dir,
                                                     tr_client, tr_messages,
                                                     storage_client)
        if getattr(args, 'app', None):
            bucket_ops.UploadFileToGcs(args.app, _IPA_MIME_TYPE)
        if args.test:
            bucket_ops.UploadFileToGcs(args.test, 'application/zip')
        if args.xctestrun_file:
            bucket_ops.UploadFileToGcs(args.xctestrun_file, 'text/xml')
        additional_ipas = getattr(args, 'additional_ipas', None) or []
        for additional_ipa in additional_ipas:
            bucket_ops.UploadFileToGcs(additional_ipa, _IPA_MIME_TYPE)
        other_files = getattr(args, 'other_files', {}) or {}
        for device_path, file_to_upload in six.iteritems(other_files):
            path = device_path
            if ':' in path:
                path = path[path.find(':') + 1:]
            bucket_ops.UploadFileToGcs(
                file_to_upload,
                None,
                destination_object=util.GetRelativeDevicePath(path))
        bucket_ops.LogGcsResultsUrl()

        tr_history_picker = history_picker.ToolResultsHistoryPicker(
            project, tr_client, tr_messages)
        history_name = PickHistoryName(args)
        history_id = tr_history_picker.GetToolResultsHistoryId(history_name)

        matrix = matrix_creator.CreateMatrix(
            args, self.context, history_id, bucket_ops.gcs_results_root,
            six.text_type(self.ReleaseTrack()))
        monitor = matrix_ops.MatrixMonitor(matrix.testMatrixId, args.type,
                                           self.context)

        with ctrl_c_handler.CancellableTestSection(monitor):
            supported_executions = monitor.HandleUnsupportedExecutions(matrix)
            tr_ids = tool_results.GetToolResultsIds(matrix, monitor)

            url = tool_results.CreateToolResultsUiUrl(project, tr_ids)
            log.status.Print('')
            if args.async_:
                return url
            log.status.Print(
                'Test results will be streamed to [{0}].'.format(url))

            # If we have exactly one testExecution, show detailed progress info.
            if len(supported_executions
                   ) == 1 and args.num_flaky_test_attempts == 0:
                monitor.MonitorTestExecutionProgress(
                    supported_executions[0].id)
            else:
                monitor.MonitorTestMatrixProgress()

        log.status.Print('\nMore details are available at [{0}].'.format(url))
        # Fetch the per-dimension test outcomes list, and also the "rolled-up"
        # matrix outcome from the Tool Results service.
        summary_fetcher = results_summary.ToolResultsSummaryFetcher(
            project, tr_client, tr_messages, tr_ids, matrix.testMatrixId)
        self.exit_code = exit_code.ExitCodeFromRollupOutcome(
            summary_fetcher.FetchMatrixRollupOutcome(),
            tr_messages.Outcome.SummaryValueValuesEnum)
        return summary_fetcher.CreateMatrixOutcomeSummaryUsingEnvironments()
Ejemplo n.º 5
0
class _BaseRun(object):
    """Invoke a test in Firebase Test Lab for Android and view test results."""

    detailed_help = {
        'DESCRIPTION':
        """\
          *{command}* invokes and monitors tests in Firebase Test Lab for
          Android.

          Three main types of Android tests are currently supported:
          - *robo*: runs a smart, automated exploration of the activities in
            your Android app which records any installation failures or crashes
            and builds an activity map with associated screenshots and video.
          - *instrumentation*: runs automated unit or integration tests written
            using a testing framework. Firebase Test Lab for Android currently
            supports the Espresso, Robotium and UI Automator 2.0 testing
            frameworks.
          - *game-loop*: executes a special intent built into the game app (a
            "demo mode") that simulates the actions of a real player. This test
            type can include multiple game loops (also called "scenarios"),
            which can be logically organized using scenario labels so that you
            can run related loops together. Refer to
            https://firebase.google.com/docs/test-lab/android/game-loop for
            more information about how to build and run Game Loop tests.

          The type of test to run can be specified with the *--type* flag,
          although the type can often be inferred from other flags.
          Specifically, if the *--test* flag is present, the test *--type*
          defaults to `instrumentation`. If *--test* is not present, then
          *--type* defaults to `robo`.

          All arguments for *{command}* may be specified on the command line
          and/or within an argument file. Run *$ gcloud topic arg-files* for
          more information about argument files.
          """,
        'EXAMPLES':
        """\
          To invoke a robo test lasting 100 seconds against the default device
          environment, run:

            $ {command} --app APP_APK --timeout 100s

          When specifying devices to test against, the preferred method is to
          use the --device flag. For example, to invoke a robo test against a
          virtual, generic MDPI Nexus device in landscape orientation, run:

            $ {command} --app APP_APK --device model=NexusLowRes,orientation=landscape

          To invoke an instrumentation test against a physical Nexus 6 device
          (MODEL_ID: shamu) which is running Android API level 21 in French, run:

            $ {command} --app APP_APK --test TEST_APK --device model=shamu,version=21,locale=fr

          To test against multiple devices, specify --device more than once:

            $ {command} --app APP_APK --test TEST_APK --device model=Nexus4,version=19 --device model=Nexus4,version=21 --device model=NexusLowRes,version=25

          You may also use the legacy dimension flags (deprecated) to specify
          which devices to use. Firebase Test Lab will run tests against every
          possible combination of the listed device dimensions. Note that some
          combinations of device models and OS versions may not be valid or
          available in Test Lab. Any unsupported combinations of dimensions in
          the test matrix will be skipped.

          For example, to execute a series of 5-minute robo tests against a very
          comprehensive matrix of virtual and physical devices, OS versions,
          locales and orientations, run:

            $ {command} --app APP_APK --timeout 5m --device-ids=shamu,NexusLowRes,Nexus5,g3,zeroflte --os-version-ids=19,21,22,23,24,25 --locales=en_GB,es,fr,ru,zh --orientations=portrait,landscape

          The above command will generate a test matrix with a total of 300 test
          executions, but only the subset of executions with valid dimension
          combinations will actually run your tests.

          Controlling Results Storage

          By default, Firebase Test Lab stores detailed test results for a
          limited time in a Google Cloud Storage bucket provided for you at
          no charge. If you wish to use a storage bucket that you control, or
          if you need to retain detailed test results for a longer period,
          use the *--results-bucket* option. See
          https://firebase.google.com/docs/test-lab/analyzing-results#detailed
          for more information.

          Detailed test result files are prefixed by default with a timestamp
          and a random character string. If you require a predictable path
          where detailed test results are stored within the results bucket
          (say, if you have a Continuous Integration system which does custom
          post-processing of test result artifacts), use the *--results-dir*
          option. _Note that each test invocation *must* have a unique storage
          location, so never reuse the same value for *--results-dir* between
          different test runs_. Possible strategies could include using a UUID
          or sequence number for *--results-dir*.

          For example, to run a robo test using a specific Google Cloud Storage
          location to hold the raw test results, run:

            $ {command} --app APP_APK --results-bucket=gs://my-bucket --results-dir=my/test/results/<unique-value>

          To run an instrumentation test and specify a custom name under which
          the history of your tests will be collected and displayed in the
          Firebase console, run:

            $ {command} --app APP_APK --test TEST_APK --results-history-name='Excelsior App Test History'

          Argument Files

          All test arguments for a given test may alternatively be stored in an
          argument group within a YAML-formatted argument file. The _ARG_FILE_
          may contain one or more named argument groups, and argument groups may
          be combined using the `include:` attribute (Run *$ gcloud topic
          arg-files* for more information). The ARG_FILE can easily be shared
          with colleagues or placed under source control to ensure consistent
          test executions.

          To run a test using arguments loaded from an ARG_FILE named
          *excelsior_args*, which contains an argument group named *robo-args:*,
          use the following syntax:

            $ {command} path/to/excelsior_args:robo-args
          """,
    }

    def Run(self, args):
        """Run the 'gcloud firebase test run' command to invoke a test in Test Lab.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation (i.e. group and command arguments combined).

    Returns:
      One of:
        - a list of TestOutcome tuples (if ToolResults are available).
        - a URL string pointing to the user's results in ToolResults or GCS.
    """
        if args. async and not args.IsSpecified('format'):
            args.format = """
          value(format(
            'Final test results will be available at [{0}].', [])
          )
      """
        log.status.Print(
            '\nHave questions, feedback, or issues? Get support by '
            'visiting:\n  https://firebase.google.com/support/\n')

        arg_manager.AndroidArgsManager().Prepare(args)

        project = util.GetProject()
        tr_client = self.context['toolresults_client']
        tr_messages = self.context['toolresults_messages']
        storage_client = self.context['storage_client']

        bucket_ops = results_bucket.ResultsBucketOps(project,
                                                     args.results_bucket,
                                                     args.results_dir,
                                                     tr_client, tr_messages,
                                                     storage_client)
        bucket_ops.UploadFileToGcs(args.app)
        if args.test:
            bucket_ops.UploadFileToGcs(args.test)
        for obb_file in (args.obb_files or []):
            bucket_ops.UploadFileToGcs(obb_file)
        if getattr(args, 'robo_script', None):
            bucket_ops.UploadFileToGcs(args.robo_script)
        additional_apks = getattr(args, 'additional_apks', None) or []
        for additional_apk in additional_apks:
            bucket_ops.UploadFileToGcs(additional_apk)
        for other_files in getattr(args, 'other-files', None) or {}:
            bucket_ops.UploadFileToGcs(other_files)
        bucket_ops.LogGcsResultsUrl()

        tr_history_picker = history_picker.ToolResultsHistoryPicker(
            project, tr_client, tr_messages)
        history_name = PickHistoryName(args)
        history_id = tr_history_picker.GetToolResultsHistoryId(history_name)

        matrix = matrix_creator.CreateMatrix(args, self.context, history_id,
                                             bucket_ops.gcs_results_root,
                                             str(self.ReleaseTrack()))
        monitor = matrix_ops.MatrixMonitor(matrix.testMatrixId, args.type,
                                           self.context)

        with ctrl_c_handler.CancellableTestSection(monitor):
            supported_executions = monitor.HandleUnsupportedExecutions(matrix)
            tr_ids = tool_results.GetToolResultsIds(matrix, monitor)

            url = tool_results.CreateToolResultsUiUrl(project, tr_ids)
            log.status.Print('')
            if args. async:
                return url
            log.status.Print(
                'Test results will be streamed to [{0}].'.format(url))

            # If we have exactly one testExecution, show detailed progress info.
            if len(supported_executions) == 1:
                monitor.MonitorTestExecutionProgress(
                    supported_executions[0].id)
            else:
                monitor.MonitorTestMatrixProgress()

        log.status.Print('\nMore details are available at [{0}].'.format(url))
        # Fetch the per-dimension test outcomes list, and also the "rolled-up"
        # matrix outcome from the Tool Results service.
        summary_fetcher = results_summary.ToolResultsSummaryFetcher(
            project, tr_client, tr_messages, tr_ids)
        self.exit_code = exit_code.ExitCodeFromRollupOutcome(
            summary_fetcher.FetchMatrixRollupOutcome(),
            tr_messages.Outcome.SummaryValueValuesEnum)
        return summary_fetcher.CreateMatrixOutcomeSummary()
Ejemplo n.º 6
0
    def Run(self, args):
        """Run the 'gcloud firebase test run' command to invoke a test in Test Lab.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation (i.e. group and command arguments combined).

    Returns:
      One of:
        - a list of TestOutcome tuples (if ToolResults are available).
        - a URL string pointing to the user's results in ToolResults or GCS.
    """
        if args.async_ and not args.IsSpecified('format'):
            args.format = """
          value(format(
            'Final test results will be available at [{0}].', [])
          )
      """
        log.status.Print(
            '\nHave questions, feedback, or issues? Get support by '
            'visiting:\n  https://firebase.google.com/support/\n')

        arg_manager.AndroidArgsManager().Prepare(args)

        project = util.GetProject()
        tr_client = self.context['toolresults_client']
        tr_messages = self.context['toolresults_messages']
        storage_client = self.context['storage_client']

        bucket_ops = results_bucket.ResultsBucketOps(project,
                                                     args.results_bucket,
                                                     args.results_dir,
                                                     tr_client, tr_messages,
                                                     storage_client)
        bucket_ops.UploadFileToGcs(args.app)
        if args.test:
            bucket_ops.UploadFileToGcs(args.test)
        for obb_file in (args.obb_files or []):
            bucket_ops.UploadFileToGcs(obb_file)
        if getattr(args, 'robo_script', None):
            bucket_ops.UploadFileToGcs(args.robo_script)
        additional_apks = getattr(args, 'additional_apks', None) or []
        for additional_apk in additional_apks:
            bucket_ops.UploadFileToGcs(additional_apk)
        # TODO(b/137674653): add a unit test that would have caught the typo fixed
        #  by CL/249286171.
        for other_files in getattr(args, 'other_files', None) or {}:
            bucket_ops.UploadFileToGcs(other_files)
        bucket_ops.LogGcsResultsUrl()

        tr_history_picker = history_picker.ToolResultsHistoryPicker(
            project, tr_client, tr_messages)
        history_name = PickHistoryName(args)
        history_id = tr_history_picker.GetToolResultsHistoryId(history_name)

        matrix = matrix_creator.CreateMatrix(
            args, self.context, history_id, bucket_ops.gcs_results_root,
            six.text_type(self.ReleaseTrack()))
        monitor = matrix_ops.MatrixMonitor(matrix.testMatrixId, args.type,
                                           self.context)

        with ctrl_c_handler.CancellableTestSection(monitor):
            supported_executions = monitor.HandleUnsupportedExecutions(matrix)
            tr_ids = tool_results.GetToolResultsIds(matrix, monitor)

            url = tool_results.CreateToolResultsUiUrl(project, tr_ids)
            log.status.Print('')
            if args.async_:
                return url
            log.status.Print(
                'Test results will be streamed to [{0}].'.format(url))

            # If we have exactly one testExecution, show detailed progress info.
            if len(supported_executions
                   ) == 1 and args.num_flaky_test_attempts == 0:
                monitor.MonitorTestExecutionProgress(
                    supported_executions[0].id)
            else:
                monitor.MonitorTestMatrixProgress()

        log.status.Print('\nMore details are available at [{0}].'.format(url))
        # Fetch the per-dimension test outcomes list, and also the "rolled-up"
        # matrix outcome from the Tool Results service.
        summary_fetcher = results_summary.ToolResultsSummaryFetcher(
            project, tr_client, tr_messages, tr_ids)
        self.exit_code = exit_code.ExitCodeFromRollupOutcome(
            summary_fetcher.FetchMatrixRollupOutcome(),
            tr_messages.Outcome.SummaryValueValuesEnum)
        if args.num_flaky_test_attempts > 0:
            if not args.IsSpecified('format'):
                args.format = util.FLAKY_ATTEMPTS_OUTCOMES_FORMAT
            return summary_fetcher.CreateFlakyAttemptsMatrixOutcomeSummary()
        else:
            return summary_fetcher.CreateMatrixOutcomeSummary()
Ejemplo n.º 7
0
class Run(base.ListCommand):
  """Invoke a test in Firebase Test Lab for iOS and view test results."""

  detailed_help = {
      'DESCRIPTION':
          """\
          *{command}* invokes and monitors tests in Firebase Test Lab for iOS.

          The currently supported iOS test frameworks are XCTest and XCUITest.
          Other iOS testing frameworks which are built upon XCTest and XCUITest
          should also work.

          The XCTEST_ZIP test package is a zip file built using Apple's Xcode
          and supporting tools. For a detailed description of the process to
          create your XCTEST_ZIP file, see
          https://firebase.google.com/docs/test-lab/ios/command-line.

          All arguments for *{command}* may be specified on the command line
          and/or within an argument file. Run *$ gcloud topic arg-files* for
          more information about argument files.
          """,
      'EXAMPLES':
          """\
          To invoke an XCTest lasting up to five minutes against the default
          device environment, run:

            $ {command} --test XCTEST_ZIP --timeout 5m

          To invoke an XCTest against an iPad 5 running iOS 11.2, run:

            $ {command} --test XCTEST_ZIP --device model=ipad5,version=11.2

          To run your tests against multiple iOS devices simultaneously, specify
          the *--device* flag more than once:

            $ {command} --test XCTEST_ZIP \
              --device model=iphone7 \
              --device model=ipadmini4,version=11.2 \
              --device model=iphonese

          To run your XCTest using a specific version of Xcode, say 9.4.1, run:

            $ {command} --test XCTEST_ZIP --xcode-version=9.4.1

          All test arguments for a given test may alternatively be stored in an
          argument group within a YAML-formatted argument file. The _ARG_FILE_
          may contain one or more named argument groups, and argument groups may
          be combined using the `include:` attribute (Run *$ gcloud topic
          arg-files* for more information). The ARG_FILE can easily be shared
          with colleagues or placed under source control to ensure consistent
          test executions.

          To run a test using arguments loaded from an ARG_FILE named
          *excelsior_app_args*, which contains an argument group named
          *ios-args:*, use the following syntax:

            $ {command} path/to/excelsior_app_args:ios-args
          """,
  }

  @staticmethod
  def Args(parser):
    """Method called by Calliope to register flags for this command.

    Args:
      parser: An argparse parser used to add arguments that follow this
          command in the CLI. Positional arguments are allowed.
    """
    arg_util.AddCommonTestRunArgs(parser)
    arg_util.AddIosTestArgs(parser)
    base.URI_FLAG.RemoveFromParser(parser)
    parser.display_info.AddFormat(util.OUTCOMES_FORMAT)

  def Run(self, args):
    """Run the 'firebase test ios run' command to invoke a test in Test Lab.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation (i.e. group and command arguments combined).

    Returns:
      One of:
        - a list of TestOutcome tuples (if ToolResults are available).
        - a URL string pointing to the user's results in ToolResults or GCS.
    """
    # TODO(b/79369595): expand libs to share more code with android run command.
    if args.async and not args.IsSpecified('format'):
      args.format = """
          value(format('Final test results will be available at [{0}].', []))
      """
    log.status.Print('\nHave questions, feedback, or issues? Get support by '
                     'emailing:\n  [email protected]\n')

    arg_manager.IosArgsManager().Prepare(args)

    project = util.GetProject()
    tr_client = self.context['toolresults_client']
    tr_messages = self.context['toolresults_messages']
    storage_client = self.context['storage_client']

    bucket_ops = results_bucket.ResultsBucketOps(project, args.results_bucket,
                                                 args.results_dir, tr_client,
                                                 tr_messages, storage_client)
    bucket_ops.UploadFileToGcs(args.test)
    if args.xctestrun_file:
      bucket_ops.UploadFileToGcs(args.xctestrun_file)
    bucket_ops.LogGcsResultsUrl()

    tr_history_picker = history_picker.ToolResultsHistoryPicker(
        project, tr_client, tr_messages)
    history_name = PickHistoryName(args)
    history_id = tr_history_picker.GetToolResultsHistoryId(history_name)

    matrix = matrix_creator.CreateMatrix(args, self.context, history_id,
                                         bucket_ops.gcs_results_root,
                                         str(self.ReleaseTrack()))
    monitor = matrix_ops.MatrixMonitor(
        matrix.testMatrixId, args.type, self.context)

    with ctrl_c_handler.CancellableTestSection(monitor):
      supported_executions = monitor.HandleUnsupportedExecutions(matrix)
      tr_ids = tool_results.GetToolResultsIds(matrix, monitor)

      url = tool_results.CreateToolResultsUiUrl(project, tr_ids)
      log.status.Print('')
      if args.async:
        return url
      log.status.Print('Test results will be streamed to [{0}].'.format(url))

      # If we have exactly one testExecution, show detailed progress info.
      if len(supported_executions) == 1 and args.num_flaky_test_attempts == 0:
        monitor.MonitorTestExecutionProgress(supported_executions[0].id)
      else:
        monitor.MonitorTestMatrixProgress()

    log.status.Print('\nMore details are available at [{0}].'.format(url))
    # Fetch the per-dimension test outcomes list, and also the "rolled-up"
    # matrix outcome from the Tool Results service.
    summary_fetcher = results_summary.ToolResultsSummaryFetcher(
        project, tr_client, tr_messages, tr_ids)
    self.exit_code = exit_code.ExitCodeFromRollupOutcome(
        summary_fetcher.FetchMatrixRollupOutcome(),
        tr_messages.Outcome.SummaryValueValuesEnum)
    if args.num_flaky_test_attempts > 0:
      if not args.IsSpecified('format'):
        args.format = util.FLAKY_ATTEMPTS_OUTCOMES_FORMAT
      return summary_fetcher.CreateFlakyAttemptsMatrixOutcomeSummary()
    else:
      return summary_fetcher.CreateMatrixOutcomeSummary()
Ejemplo n.º 8
0
 def testGetProject_ProjectSet(self):
     properties.VALUES.core.project.Set(self.PROJECT_ID)
     self.assertEqual(util.GetProject(), self.PROJECT_ID)
Ejemplo n.º 9
0
    def Run(self, args):
        """Run the 'gcloud firebase test run' command to invoke a test in Test Lab.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation (i.e. group and command arguments combined).

    Returns:
      One of:
        - a list of TestOutcome tuples (if ToolResults are available).
        - a URL string pointing to the user's results in ToolResults or GCS.
    """
        arg_manager.AndroidArgsManager().Prepare(args)

        project = util.GetProject()
        tr_client = self.context['toolresults_client']
        tr_messages = self.context['toolresults_messages']
        storage_client = self.context['storage_client']

        bucket_ops = results_bucket.ResultsBucketOps(project,
                                                     args.results_bucket,
                                                     args.results_dir,
                                                     tr_client, tr_messages,
                                                     storage_client)
        bucket_ops.UploadFileToGcs(args.app)
        if args.test:
            bucket_ops.UploadFileToGcs(args.test)
        for obb_file in (args.obb_files or []):
            bucket_ops.UploadFileToGcs(obb_file)
        bucket_ops.LogGcsResultsUrl()

        tr_history_picker = history_picker.ToolResultsHistoryPicker(
            project, tr_client, tr_messages)
        history_name = PickHistoryName(args)
        history_id = tr_history_picker.GetToolResultsHistoryId(history_name)

        matrix = matrix_creator.CreateMatrix(args, self.context, history_id,
                                             bucket_ops.gcs_results_root)
        monitor = matrix_ops.MatrixMonitor(matrix.testMatrixId, args.type,
                                           self.context)

        with ctrl_c_handler.CancellableTestSection(monitor):
            supported_executions = monitor.HandleUnsupportedExecutions(matrix)
            tr_ids = tool_results.GetToolResultsIds(matrix, monitor)

            url = tool_results.CreateToolResultsUiUrl(project, tr_ids)
            log.status.Print('')
            if args. async:
                return url
            log.status.Print(
                'Test results will be streamed to [{0}].'.format(url))

            # If we have exactly one testExecution, show detailed progress info.
            if len(supported_executions) == 1:
                monitor.MonitorTestExecutionProgress(
                    supported_executions[0].id)
            else:
                monitor.MonitorTestMatrixProgress()

        log.status.Print('\nMore details are available at [{0}].'.format(url))
        # Fetch the per-dimension test outcomes list, and also the "rolled-up"
        # matrix outcome from the Tool Results service.
        summary_fetcher = results_summary.ToolResultsSummaryFetcher(
            project, tr_client, tr_messages, tr_ids)
        self.exit_code = exit_code.ExitCodeFromRollupOutcome(
            summary_fetcher.FetchMatrixRollupOutcome(),
            tr_messages.Outcome.SummaryValueValuesEnum)
        return summary_fetcher.CreateMatrixOutcomeSummary()