Example #1
0
def main(args=sys.argv[1:]):
    usage = "usage: %prog [options] <test key>"
    parser = eideticker.TestOptionParser(usage=usage)
    parser.add_option("--url-params", action="store",
                      dest="url_params",
                      help="additional url parameters for test")
    parser.add_option("--name", action="store",
                      type="string", dest="capture_name",
                      help="name to give capture")
    parser.add_option("--capture-file", action="store",
                      type="string", dest="capture_file",
                      help="name to give to capture file")
    parser.add_option("--no-capture", action="store_true",
                      dest="no_capture",
                      help="run through the test, but don't actually "
                      "capture anything")
    parser.add_option("--app-name", action="store",
                      type="string", dest="appname",
                      help="Specify an application name (android only)")
    parser.add_option("--test-type", action="store", type="string",
                      dest="test_type", help="override test type")
    parser.add_option("--profile-file", action="store",
                      type="string", dest="profile_file",
                      help="Collect a performance profile using the built in "
                      "profiler (fennec only).")
    parser.add_option("--request-log-file", action="store",
                      type="string", dest="request_log_file",
                      help="Collect a log of HTTP requests during test")
    parser.add_option("--actions-log-file", action="store",
                      type="string", dest="actions_log_file",
                      help="Collect a log of actions requests during test")

    options, args = parser.parse_args()

    if len(args) != 1:
        parser.error("You must specify (only) a test key")
        sys.exit(1)
    testkey = args[0]

    device_prefs = eideticker.getDevicePrefs(options)

    if options.prepare_test:
        eideticker.prepare_test(testkey, device_prefs)

    testlog = eideticker.run_test(testkey, options.capture_device,
                                  options.appname,
                                  options.capture_name, device_prefs,
                                  extra_prefs=options.extra_prefs,
                                  extra_env_vars=options.extra_env_vars,
                                  test_type=options.test_type,
                                  profile_file=options.profile_file,
                                  no_capture=options.no_capture,
                                  capture_area=options.capture_area,
                                  capture_file=options.capture_file,
                                  wifi_settings_file=options.wifi_settings_file,
                                  sync_time=options.sync_time)

    # save logs if applicable
    testlog.save_logs(http_request_log_path=options.request_log_file,
                      actions_log_path=options.actions_log_file)
Example #2
0
def runtest(dm, device_prefs, options, product, appinfo, testinfo,
            capture_name):
    capture_filename = os.path.join(CAPTURE_DIR,
                                    "%s-%s-%s-%s.zip" % (testinfo['key'],
                                                         options.appname,
                                                         appinfo.get('appdate'),
                                                         int(time.time())))
    productname = product['name']

    if options.enable_profiling:
        productname += "-profiling"
        profile_path = os.path.join(
            'profiles', 'sps-profile-%s.zip' % time.time())
        profile_filename = os.path.join(options.dashboard_dir, profile_path)
    else:
        profile_filename = None

    test_completed = False
    for i in range(3):
        print "Running test %s (try %s of 3)" % (testinfo['key'], (i + 1))

        try:
            testlog = eideticker.run_test(
                testinfo['key'], options, capture_filename=capture_filename,
                profile_filename=profile_filename, capture_name=capture_name)
            test_completed = True
            break
        except eideticker.TestException, e:
            if e.can_retry:
                print "Test failed, but not fatally. Retrying..."
                print e
            else:
                print "Test failed (fatally). Aborting"
                print e
                raise
Example #3
0
def runtest(dm,
            device_prefs,
            capture_device,
            capture_area,
            product,
            appname,
            appinfo,
            testinfo,
            capture_name,
            outputdir,
            datafile,
            data,
            enable_profiling=False,
            log_http_requests=False,
            log_actions=False,
            baseline=False,
            wifi_settings_file=None,
            sync_time=True):
    capture_file = os.path.join(
        CAPTURE_DIR, "%s-%s-%s-%s.zip" %
        (testinfo['key'], appname, appinfo.get('appdate'), int(time.time())))
    productname = product['name']

    profile_file = None
    if enable_profiling:
        productname += "-profiling"
        profile_path = os.path.join('profiles',
                                    'sps-profile-%s.zip' % time.time())
        profile_file = os.path.join(outputdir, profile_path)

    test_completed = False
    for i in range(3):
        print "Running test (try %s of 3)" % (i + 1)

        # Kill any existing instances of the processes before starting
        dm.killProcess(appname)

        try:
            testlog = eideticker.run_test(
                testinfo['key'],
                capture_device,
                appname,
                capture_name,
                device_prefs,
                profile_file=profile_file,
                capture_area=capture_area,
                capture_file=capture_file,
                wifi_settings_file=wifi_settings_file,
                sync_time=sync_time)
            test_completed = True
            break
        except eideticker.TestException, e:
            if e.can_retry:
                print "Test failed, but not fatally. Retrying..."
            else:
                raise
Example #4
0
def runtest(dm, device_prefs, capture_device, capture_area, product, appname,
            appinfo, testinfo, capture_name, outputdir, datafile, data,
            enable_profiling=False, log_http_requests=False, baseline=False):
    capture_file = os.path.join(CAPTURE_DIR,
                                "%s-%s-%s-%s.zip" % (testinfo['key'],
                                                     appname,
                                                     appinfo.get('appdate'),
                                                     int(time.time())))
    productname = product['name']

    profile_file = None
    if enable_profiling:
        productname += "-profiling"
        profile_path = os.path.join('profiles', 'sps-profile-%s.zip' % time.time())
        profile_file = os.path.join(outputdir, profile_path)

    request_log_file = None
    if log_http_requests:
        request_log_path = os.path.join('httplogs', 'http-log-%s.json' % time.time())
        request_log_file = os.path.join(outputdir, request_log_path)

    test_completed = False
    for i in range(3):
        print "Running test (try %s of 3)" % (i+1)

        # Kill any existing instances of the processes before starting
        dm.killProcess(appname)

        try:
            eideticker.run_test(testinfo['key'], capture_device,
                                appname, capture_name, device_prefs,
                                profile_file=profile_file,
                                request_log_file=request_log_file,
                                capture_area=capture_area,
                                capture_file=capture_file)
            test_completed = True
            break
        except eideticker.TestException, e:
            if e.can_retry:
                print "Test failed, but not fatally. Retrying..."
            else:
                raise
Example #5
0
def main(args=sys.argv[1:]):
    usage = "usage: %prog [options] <test key>"
    parser = eideticker.TestOptionParser(usage=usage)
    parser.add_option("--url-params", action="store",
                      dest="url_params",
                      help="additional url parameters for test")
    parser.add_option("--name", action="store",
                      type="string", dest="capture_name",
                      help="name to give capture")
    parser.add_option("--capture-file", action="store",
                      type="string", dest="capture_file",
                      help="name to give to capture file")
    parser.add_option("--app-name", action="store",
                      type="string", dest="appname",
                      help="Specify an application name (android only)")
    parser.add_option("--test-type", action="store", type="string",
                      dest="test_type", help="override test type")
    parser.add_option("--profile-file", action="store",
                      type="string", dest="profile_file",
                      help="Collect a performance profile using the built in "
                      "profiler (fennec only).")
    parser.add_option("--request-log-file", action="store",
                      type="string", dest="request_log_file",
                      help="Collect a log of HTTP requests during test")
    parser.add_option("--actions-log-file", action="store",
                      type="string", dest="actions_log_file",
                      help="Collect a log of actions requests during test")

    options, args = parser.parse_args()

    if len(args) != 1:
        parser.error("You must specify (only) a test key")
        sys.exit(1)
    testkey = args[0]


    if options.prepare_test:
        eideticker.prepare_test(testkey, options)

    testlog = eideticker.run_test(testkey, options,
                                  capture_filename=options.capture_file,
                                  profile_filename=options.profile_file)

    # save logs if applicable
    if options.request_log_file:
        open(options.request_log_file, 'w').write(json.dumps(testlog.http_request_log))
    if options.actions_log_file:
        open(options.actions_log_file, 'w').write(json.dumps(testlog.actions))
Example #6
0
def runtest(dm, device_prefs, options, product, appname,
            appinfo, testinfo, capture_name, datafile, data,
            log_http_requests=False, log_actions=False):
    capture_file = os.path.join(CAPTURE_DIR,
                                "%s-%s-%s-%s.zip" % (testinfo['key'],
                                                     appname,
                                                     appinfo.get('appdate'),
                                                     int(time.time())))
    productname = product['name']

    profile_file = None
    if options.enable_profiling:
        productname += "-profiling"
        profile_path = os.path.join(
            'profiles', 'sps-profile-%s.zip' % time.time())
        profile_file = os.path.join(options.outputdir, profile_path)

    test_completed = False
    for i in range(3):
        print "Running test (try %s of 3)" % (i + 1)

        # Kill any existing instances of the processes before starting
        dm.killProcess(appname)

        try:
            testlog = eideticker.run_test(
                testinfo['key'], options.capture_device,
                appname, capture_name, device_prefs,
                profile_file=profile_file,
                capture_area=options.capture_area,
                camera_settings_file=options.camera_settings_file,
                capture=options.capture,
                capture_file=capture_file,
                wifi_settings_file=options.wifi_settings_file,
                sync_time=options.sync_time,
                use_vpxenc=options.use_vpxenc)
            test_completed = True
            break
        except eideticker.TestException, e:
            if e.can_retry:
                print "Test failed, but not fatally. Retrying..."
            else:
                raise
Example #7
0
def runtest(dm, device_prefs, options, product, appinfo, testinfo,
            capture_name):
    capture_filename = os.path.join(
        CAPTURE_DIR,
        "%s-%s-%s-%s.zip" % (testinfo['key'], options.appname,
                             appinfo.get('appdate'), int(time.time())))
    productname = product['name']
    test_uuid = uuid.uuid1().hex

    if options.enable_profiling:
        productname += "-profiling"
        profile_filename = os.path.join(options.dashboard_dir, 'profiles',
                                        '%s.zip' % test_uuid)
    else:
        profile_filename = None

    test_completed = False
    for i in range(3):
        print "Running test %s (try %s of 3)" % (testinfo['key'], (i + 1))

        try:
            testlog = eideticker.run_test(testinfo['key'],
                                          options,
                                          capture_filename=capture_filename,
                                          profile_filename=profile_filename,
                                          capture_name=capture_name)
            test_completed = True
            break
        except eideticker.TestException, e:
            if e.can_retry:
                print "Test failed, but not fatally. Retrying..."
                print e
            else:
                print "Test failed (fatally). Aborting"
                print e
                raise
Example #8
0
def main(args=sys.argv[1:]):
    usage = "usage: %prog [options] <test key>"
    parser = eideticker.TestOptionParser(usage=usage)
    parser.add_option("--url-params",
                      action="store",
                      dest="url_params",
                      help="additional url parameters for test")
    parser.add_option("--name",
                      action="store",
                      type="string",
                      dest="capture_name",
                      help="name to give capture")
    parser.add_option("--capture-file",
                      action="store",
                      type="string",
                      dest="capture_file",
                      help="name to give to capture file")
    parser.add_option("--no-capture",
                      action="store_true",
                      dest="no_capture",
                      help="run through the test, but don't actually "
                      "capture anything")
    parser.add_option("--app-name",
                      action="store",
                      type="string",
                      dest="appname",
                      help="Specify an application name (android only)")
    parser.add_option("--test-type",
                      action="store",
                      type="string",
                      dest="test_type",
                      help="override test type")
    parser.add_option("--profile-file",
                      action="store",
                      type="string",
                      dest="profile_file",
                      help="Collect a performance profile using the built in "
                      "profiler (fennec only).")
    parser.add_option("--request-log-file",
                      action="store",
                      type="string",
                      dest="request_log_file",
                      help="Collect a log of HTTP requests during test")
    parser.add_option("--actions-log-file",
                      action="store",
                      type="string",
                      dest="actions_log_file",
                      help="Collect a log of actions requests during test")

    options, args = parser.parse_args()

    if len(args) != 1:
        parser.error("You must specify (only) a test key")
        sys.exit(1)
    testkey = args[0]

    device_prefs = eideticker.getDevicePrefs(options)

    if options.prepare_test:
        eideticker.prepare_test(testkey, device_prefs)

    testlog = eideticker.run_test(
        testkey,
        options.capture_device,
        options.appname,
        options.capture_name,
        device_prefs,
        extra_prefs=options.extra_prefs,
        extra_env_vars=options.extra_env_vars,
        test_type=options.test_type,
        profile_file=options.profile_file,
        no_capture=options.no_capture,
        capture_area=options.capture_area,
        fps=options.fps,
        capture_file=options.capture_file,
        wifi_settings_file=options.wifi_settings_file,
        sync_time=options.sync_time)

    # save logs if applicable
    testlog.save_logs(http_request_log_path=options.request_log_file,
                      actions_log_path=options.actions_log_file)
def runtest(device_prefs, testname, options, apk=None, appname=None,
            appdate=None):
    if apk:
        appinfo = eideticker.get_fennec_appinfo(apk)
        appname = appinfo['appname']
        print "Installing %s (version: %s, revision %s)" % (
            appinfo['appname'], appinfo['version'], appinfo['revision'])
        device = eideticker.getDevice(**device_prefs)
        device.updateApp(apk)
    else:
        appinfo = None

    options.appname = appname

    testinfo = eideticker.get_testinfo(testname)
    stableframecapture = (testinfo['type'] in ('startup', 'webstartup') or
                          testinfo['defaultMeasure'] == 'timetostableframe')

    capture_results = []

    if options.prepare_test:
        eideticker.prepare_test(testname, options)

    for i in range(options.num_runs):
        # Now run the test
        curtime = int(time.time())
        capture_file = os.path.join(CAPTURE_DIR,
                                    "metric-test-%s-%s.zip" % (appname,
                                                               curtime))
        if options.enable_profiling and options.outputdir:
            profile_relpath = os.path.join(
                'profiles', 'sps-profile-%s.zip' % time.time())
            profile_filename = os.path.join(options.outputdir, profile_relpath)
        else:
            profile_filename = None

        current_date = time.strftime("%Y-%m-%d")
        capture_name = "%s - %s (taken on %s)" % (
            testname, appname, current_date)

        testlog = eideticker.run_test(testname, options,
                                      capture_filename=capture_file,
                                      profile_filename=profile_filename,
                                      capture_name=capture_name)

        capture_uuid = uuid.uuid1().hex
        datapoint = { 'uuid': capture_uuid }
        metadata = {}
        metrics = {}

        if options.capture:
            capture = videocapture.Capture(capture_file)

            datapoint['captureFile'] = metadata['captureFile'] = capture_file
            metadata['captureFPS'] = capture.fps
            metadata['generatedVideoFPS'] = capture.generated_video_fps

            if stableframecapture:
                metrics['timetostableframe'] = \
                    eideticker.get_stable_frame_time(capture)
            else:
                metrics.update(
                    eideticker.get_standard_metrics(capture, testlog.actions))
            metadata['metrics'] = metrics

            metadata.update(eideticker.get_standard_metric_metadata(capture))

            if options.outputdir:
                # video
                video_relpath = os.path.join(
                    'videos', 'video-%s.webm' % time.time())
                video_path = os.path.join(options.outputdir, video_relpath)
                open(video_path, 'w').write(capture.get_video().read())
                metadata['video'] = video_relpath

        if options.log_checkerboard_stats:
            metrics['internalcheckerboard'] = \
                testlog.checkerboard_percent_totals

        # Want metrics data in data, so we can graph everything at once
        datapoint.update(metrics)

        if options.enable_profiling:
            metadata['profile'] = profile_filename

        # dump metadata
        if options.outputdir:
            # metadata
            metadata_path = os.path.join(options.outputdir, 'metadata',
                                         '%s.json' % capture_uuid)
            open(metadata_path, 'w').write(json.dumps(metadata))

        capture_results.append(datapoint)

    if options.devicetype == "b2g":
        # FIXME: get information from sources.xml and application.ini on
        # device, as we do in update-dashboard.py
        display_key = appkey = "FirefoxOS"
    else:
        appkey = appname
        if appdate:
            appkey = appdate.isoformat()
        else:
            appkey = appname

        if appinfo and appinfo.get('revision'):
            display_key = "%s (%s)" % (appkey, appinfo['revision'])
        else:
            display_key = appkey

    print "=== Results on %s for %s ===" % (testname, display_key)

    if options.capture:
        measures = [ ('timetostableframe',
                      'Times to first stable frame (seconds)'),
                     ('uniqueframes', 'Number of unique frames'),
                     ('fps', 'Average number of unique frames per second'),
                     ('overallentropy',
                      'Overall entropy over length of capture'),
                     ('checkerboard',
                      'Checkerboard area/duration (sum of percents NOT '
                      'percentage)'),
                     ('timetoresponse',
                      'Time to first input response (seconds)') ]
        for measure in measures:
            if capture_results[0].get(measure[0]):
                print "  %s:" % measure[1]
                print "  %s" % map(lambda c: c[measure[0]], capture_results)
                print

        print "  Capture files:"
        print "  Capture files: %s" % map(lambda c: c['captureFile'], capture_results)
        print

    if options.log_checkerboard_stats:
        print "  Internal Checkerboard Stats (sum of percents, not "
        "percentage):"
        print "  %s" % map(
            lambda c: c['internalcheckerboard'], capture_results)
        print

    if options.outputdir:
        outputfile = os.path.join(options.outputdir, "metric.json")
        resultdict = {'title': testname, 'data': {}}
        if os.path.isfile(outputfile):
            resultdict.update(json.loads(open(outputfile).read()))

        if not resultdict['data'].get(appkey):
            resultdict['data'][appkey] = []
        resultdict['data'][appkey].extend(capture_results)

        with open(outputfile, 'w') as f:
            f.write(json.dumps(resultdict))
def runtest(device_prefs,
            testname,
            options,
            apk=None,
            appname=None,
            appdate=None):
    if apk:
        appinfo = eideticker.get_fennec_appinfo(apk)
        appname = appinfo['appname']
        print "Installing %s (version: %s, revision %s)" % (
            appinfo['appname'], appinfo['version'], appinfo['revision'])
        device = eideticker.getDevice(**device_prefs)
        device.updateApp(apk)
    else:
        appinfo = None

    testinfo = eideticker.get_testinfo(testname)
    stableframecapture = (testinfo['type'] in ('startup', 'webstartup')
                          or testinfo['defaultMeasure'] == 'timetostableframe')

    capture_results = []

    if options.prepare_test:
        eideticker.prepare_test(testname, device_prefs,
                                options.wifi_settings_file)

    for i in range(options.num_runs):
        # Now run the test
        curtime = int(time.time())
        capture_file = os.path.join(
            CAPTURE_DIR, "metric-test-%s-%s.zip" % (appname, curtime))
        if options.enable_profiling and options.outputdir:
            profile_relpath = os.path.join('profiles',
                                           'sps-profile-%s.zip' % time.time())
            profile_file = os.path.join(options.outputdir, profile_relpath)
        else:
            profile_file = None

        current_date = time.strftime("%Y-%m-%d")
        capture_name = "%s - %s (taken on %s)" % (testname, appname,
                                                  current_date)

        testlog = eideticker.run_test(
            testname,
            options.capture_device,
            appname,
            capture_name,
            device_prefs,
            extra_prefs=options.extra_prefs,
            extra_env_vars=options.extra_env_vars,
            log_checkerboard_stats=options.get_internal_checkerboard_stats,
            profile_file=profile_file,
            capture_area=options.capture_area,
            camera_settings_file=options.camera_settings_file,
            capture=options.capture,
            fps=options.fps,
            capture_file=capture_file,
            wifi_settings_file=options.wifi_settings_file,
            sync_time=options.sync_time,
            use_vpxenc=options.use_vpxenc)

        capture_uuid = uuid.uuid1().hex
        datapoint = {'uuid': capture_uuid}
        metadata = {}
        metrics = {}

        if options.capture:
            capture = videocapture.Capture(capture_file)

            datapoint['captureFile'] = metadata['captureFile'] = capture_file
            metadata['captureFPS'] = capture.fps
            metadata['generatedVideoFPS'] = capture.generated_video_fps

            if stableframecapture:
                metrics['timetostableframe'] = \
                    eideticker.get_stable_frame_time(capture)
            else:
                metrics.update(
                    eideticker.get_standard_metrics(capture, testlog.actions))
            metadata['metrics'] = metrics

            metadata.update(eideticker.get_standard_metric_metadata(capture))

            if options.outputdir:
                # video
                video_relpath = os.path.join('videos',
                                             'video-%s.webm' % time.time())
                video_path = os.path.join(options.outputdir, video_relpath)
                open(video_path, 'w').write(capture.get_video().read())
                metadata['video'] = video_relpath

        if options.get_internal_checkerboard_stats:
            metrics['internalcheckerboard'] = \
                testlog.checkerboard_percent_totals

        # Want metrics data in data, so we can graph everything at once
        datapoint.update(metrics)

        if options.enable_profiling:
            metadata['profile'] = profile_file

        # dump metadata
        if options.outputdir:
            # metadata
            metadata_path = os.path.join(options.outputdir, 'metadata',
                                         '%s.json' % capture_uuid)
            open(metadata_path, 'w').write(json.dumps(metadata))

        capture_results.append(datapoint)

    if options.devicetype == "b2g":
        # FIXME: get information from sources.xml and application.ini on
        # device, as we do in update-dashboard.py
        display_key = appkey = "FirefoxOS"
    else:
        appkey = appname
        if appdate:
            appkey = appdate.isoformat()
        else:
            appkey = appname

        if appinfo and appinfo.get('revision'):
            display_key = "%s (%s)" % (appkey, appinfo['revision'])
        else:
            display_key = appkey

    print "=== Results on %s for %s ===" % (testname, display_key)

    if options.capture:
        measures = [
            ('timetostableframe', 'Times to first stable frame (seconds)'),
            ('uniqueframes', 'Number of unique frames'),
            ('fps', 'Average number of unique frames per second'),
            ('overallentropy', 'Overall entropy over length of capture'),
            ('checkerboard', 'Checkerboard area/duration (sum of percents NOT '
             'percentage)'),
            ('timetoresponse', 'Time to first input response (seconds)')
        ]
        for measure in measures:
            if capture_results[0].get(measure[0]):
                print "  %s:" % measure[1]
                print "  %s" % map(lambda c: c[measure[0]], capture_results)
                print

        print "  Capture files:"
        print "  Capture files: %s" % map(lambda c: c['captureFile'],
                                          capture_results)
        print

    if options.get_internal_checkerboard_stats:
        print "  Internal Checkerboard Stats (sum of percents, not "
        "percentage):"
        print "  %s" % map(lambda c: c['internalcheckerboard'],
                           capture_results)
        print

    if options.outputdir:
        outputfile = os.path.join(options.outputdir, "metric.json")
        resultdict = {'title': testname, 'data': {}}
        if os.path.isfile(outputfile):
            resultdict.update(json.loads(open(outputfile).read()))

        if not resultdict['data'].get(appkey):
            resultdict['data'][appkey] = []
        resultdict['data'][appkey].extend(capture_results)

        with open(outputfile, 'w') as f:
            f.write(json.dumps(resultdict))
def runtest(device_prefs, testname, options, apk=None, appname=None, appdate=None):
    device = None
    if apk:
        appinfo = eideticker.get_fennec_appinfo(apk)
        appname = appinfo["appname"]
        print "Installing %s (version: %s, revision %s)" % (appinfo["appname"], appinfo["version"], appinfo["revision"])
        device = eideticker.getDevice(**device_prefs)
        device.updateApp(apk)
    else:
        appinfo = None

    testinfo = eideticker.get_testinfo(testname)
    stableframecapture = (
        testinfo["type"] in ("startup", "webstartup") or testinfo["defaultMeasure"] == "timetostableframe"
    )

    capture_results = []

    for i in range(options.num_runs):
        # Kill any existing instances of the processes (for Android)
        if device:
            device.killProcess(appname)

        # Now run the test
        curtime = int(time.time())
        capture_file = os.path.join(CAPTURE_DIR, "metric-test-%s-%s.zip" % (appname, curtime))
        if options.enable_profiling:
            profile_file = os.path.join(PROFILE_DIR, "profile-%s-%s.zip" % (appname, curtime))
        else:
            profile_file = None

        current_date = time.strftime("%Y-%m-%d")
        capture_name = "%s - %s (taken on %s)" % (testname, appname, current_date)

        if options.prepare_test:
            eideticker.prepare_test(testname, device_prefs)

        testlog = eideticker.run_test(
            testname,
            options.capture_device,
            appname,
            capture_name,
            device_prefs,
            extra_prefs=options.extra_prefs,
            extra_env_vars=options.extra_env_vars,
            log_checkerboard_stats=options.get_internal_checkerboard_stats,
            profile_file=profile_file,
            capture_area=options.capture_area,
            no_capture=options.no_capture,
            fps=options.fps,
            capture_file=capture_file,
            wifi_settings_file=options.wifi_settings_file,
            sync_time=options.sync_time,
        )

        capture_result = {}
        if not options.no_capture:
            capture_result["file"] = capture_file

            capture = videocapture.Capture(capture_file)
            capture_result["captureFPS"] = capture.fps

            if stableframecapture:
                capture_result["timetostableframe"] = eideticker.get_stable_frame_time(capture)
            else:
                capture_result.update(eideticker.get_standard_metrics(capture, testlog.actions))
            if options.outputdir:
                # video
                video_relpath = os.path.join("videos", "video-%s.webm" % time.time())
                video_path = os.path.join(options.outputdir, video_relpath)
                open(video_path, "w").write(capture.get_video().read())
                capture_result["video"] = video_relpath

                # framediff
                framediff_relpath = os.path.join("framediffs", "framediff-%s.json" % time.time())
                framediff_path = os.path.join(options.outputdir, framediff_relpath)
                with open(framediff_path, "w") as f:
                    framediff = videocapture.get_framediff_sums(capture)
                    f.write(json.dumps({"diffsums": framediff}))
                capture_result["frameDiff"] = framediff_relpath

        if options.enable_profiling:
            capture_result["profile"] = profile_file

        if options.get_internal_checkerboard_stats:
            capture_result["internalcheckerboard"] = testlog.checkerboard_percent_totals

        capture_results.append(capture_result)

    if options.devicetype == "b2g":
        # FIXME: get information from sources.xml and application.ini on
        # device, as we do in update-dashboard.py
        display_key = appkey = "FirefoxOS"
    else:
        appkey = appname
        if appdate:
            appkey = appdate.isoformat()
        else:
            appkey = appname

        if appinfo and appinfo.get("revision"):
            display_key = "%s (%s)" % (appkey, appinfo["revision"])
        else:
            display_key = appkey

    print "=== Results on %s for %s ===" % (testname, display_key)

    if not options.no_capture:
        if stableframecapture:
            print "  Times to first stable frame (seconds):"
            print "  %s" % map(lambda c: c["timetostableframe"], capture_results)
            print
        else:
            print "  Number of unique frames:"
            print "  %s" % map(lambda c: c["uniqueframes"], capture_results)
            print

            print "  Average number of unique frames per second:"
            print "  %s" % map(lambda c: c["fps"], capture_results)
            print

            print "  Checkerboard area/duration (sum of percents NOT percentage):"
            print "  %s" % map(lambda c: c["checkerboard"], capture_results)
            print

            print "  Time to first input response: "
            print "  %s" % map(lambda c: c.get("timetoresponse"), capture_results)
            print

        print "  Capture files:"
        print "  Capture files: %s" % map(lambda c: c["file"], capture_results)
        print

    if options.enable_profiling:
        print "  Profile files:"
        print "  Profile files: %s" % map(lambda c: c["profile"], capture_results)
        print

    if options.get_internal_checkerboard_stats:
        print "  Internal Checkerboard Stats (sum of percents, not percentage):"
        print "  %s" % map(lambda c: c["internalcheckerboard"], capture_results)
        print

    if options.outputdir:
        outputfile = os.path.join(options.outputdir, "metric.json")
        resultdict = {"title": testname, "data": {}}
        if os.path.isfile(outputfile):
            resultdict.update(json.loads(open(outputfile).read()))

        if not resultdict["data"].get(appkey):
            resultdict["data"][appkey] = []
        resultdict["data"][appkey].extend(capture_results)

        with open(outputfile, "w") as f:
            f.write(json.dumps(resultdict))
Example #12
0
def main(args=sys.argv[1:]):
    usage = "usage: %prog [options] <test key>"
    parser = eideticker.CaptureOptionParser(usage=usage)
    parser.add_option("--url-params", action="store",
                      dest="url_params",
                      help="additional url parameters for test")
    parser.add_option("--name", action="store",
                      type="string", dest="capture_name",
                      help="name to give capture")
    parser.add_option("--capture-file", action="store",
                      type="string", dest="capture_file",
                      help="name to give to capture file")
    parser.add_option("--no-capture", action="store_true",
                      dest="no_capture",
                      help="run through the test, but don't actually "
                      "capture anything")
    parser.add_option("--app-name", action="store",
                      type="string", dest="appname",
                      help="Specify an application name (android only)")
    parser.add_option("--test-type", action="store", type="string",
                      dest="test_type", help="override test type")
    parser.add_option("--checkerboard-log-file", action="store",
                      type="string", dest="checkerboard_log_file",
                      help="name to give checkerboarding stats file (fennec "
                      "only)")
    parser.add_option("--extra-prefs", action="store", dest="extra_prefs",
                      default="{}",
                      help="Extra profile preference for Firefox browsers. "
                      "Must be passed in as a JSON dictionary")
    parser.add_option("--profile-file", action="store",
                      type="string", dest="profile_file",
                      help="Collect a performance profile using the built in "
                      "profiler (fennec only).")
    parser.add_option("--request-log-file", action="store",
                      type="string", dest="request_log_file",
                      help="Collect a log of HTTP requests during tests")
    parser.add_option("--extra-env-vars", action="store", dest="extra_env_vars",
                      default="",
                      help='Extra environment variables to set in '
                      '"VAR1=VAL1 VAR2=VAL2" format')

    options, args = parser.parse_args()
    parser.validate_options(options)

    if len(args) != 1:
        parser.error("You must specify (only) a test key")
        sys.exit(1)
    testkey = args[0]

    try:
        extra_prefs = json.loads(options.extra_prefs)
    except ValueError:
        parser.error("Error processing extra preferences: not valid JSON!")
        raise

    keyvals = options.extra_env_vars.split()
    extra_env_vars = {}
    for kv in keyvals:
        (var, _, val) = kv.partition("=")
        extra_env_vars[var] = val

    capture_area = None
    if options.capture_area:
        # we validated this previously...
        capture_area = json.loads(options.capture_area)
    device_prefs = eideticker.getDevicePrefs(options)

    eideticker.run_test(testkey, options.capture_device,
                        options.appname,
                        options.capture_name, device_prefs,
                        extra_prefs=extra_prefs,
                        extra_env_vars=extra_env_vars,
                        test_type=options.test_type,
                        profile_file=options.profile_file,
                        request_log_file=options.request_log_file,
                        checkerboard_log_file=options.checkerboard_log_file,
                        no_capture=options.no_capture,
                        capture_area=capture_area,
                        capture_file=options.capture_file)
Example #13
0
def runtest(device_prefs, capture_device, outputdir, outputfile, testname, url_params, num_runs,
             startup_test, no_capture, get_internal_checkerboard_stats,
             apk=None, appname = None, appdate = None, enable_profiling=False,
             extra_prefs={}, extra_env_vars={}):
    device = None
    if apk:
        appinfo = eideticker.get_fennec_appinfo(apk)
        appname = appinfo['appname']
        print "Installing %s (version: %s, revision %s)" % (appinfo['appname'],
                                                            appinfo['version'],
                                                            appinfo['revision'])
        device = eideticker.getDevice(**device_prefs)
        device.updateApp(apk)
    else:
        appinfo = None

    captures = []

    for i in range(num_runs):
        # Kill any existing instances of the processes (for Android)
        if device:
            device.killProcess(appname)

        # Now run the test
        curtime = int(time.time())
        capture_file = os.path.join(CAPTURE_DIR,
                                    "metric-test-%s-%s.zip" % (appname,
                                                               curtime))
        if enable_profiling:
            profile_file = os.path.join(PROFILE_DIR,
                                        "profile-%s-%s.zip" % (appname, curtime))
        else:
            profile_file = None

        if get_internal_checkerboard_stats:
            checkerboard_log_file = tempfile.NamedTemporaryFile()
        else:
            checkerboard_log_file = None

        current_date = time.strftime("%Y-%m-%d")
        capture_name = "%s - %s (taken on %s)" % (testname, appname, current_date)

        eideticker.run_test(testname, capture_device,
                            appname, capture_name, device_prefs,
                            extra_prefs=extra_prefs,
                            extra_env_vars=extra_env_vars,
                            checkerboard_log_file=checkerboard_log_file,
                            profile_file=profile_file,
                            no_capture=no_capture,
                            capture_file=capture_file)

        capture_result = {}
        if not no_capture:
            capture_result['file'] = capture_file

            capture = videocapture.Capture(capture_file)

            if startup_test:
                capture_result['stableframe'] = videocapture.get_stable_frame(capture)
            else:
                capture_result['uniqueframes'] = videocapture.get_num_unique_frames(capture)
                capture_result['fps'] = videocapture.get_fps(capture)
                capture_result['checkerboard'] = videocapture.get_checkerboarding_area_duration(capture)
            if outputdir:
                video_path = os.path.join('videos', 'video-%s.webm' % time.time())
                video_file = os.path.join(outputdir, video_path)
                open(video_file, 'w').write(capture.get_video().read())
                capture_result['video'] = video_path

        if enable_profiling:
            capture_result['profile'] = profile_file

        if get_internal_checkerboard_stats:
            internal_checkerboard_totals = parse_checkerboard_log(checkerboard_log_file.name)
            capture_result['internalcheckerboard'] = internal_checkerboard_totals

        captures.append(capture_result)

    appkey = appname
    if appdate:
        appkey = appdate.isoformat()
    else:
        appkey = appname

    if appinfo and appinfo.get('revision'):
        display_key = "%s (%s)" % (appkey, appinfo['revision'])
    else:
        display_key = appkey
    print "=== Results for %s ===" % display_key

    if not no_capture:
        if startup_test:
            print "  First stable frames:"
            print "  %s" % map(lambda c: c['stableframe'], captures)
            print
        else:
            print "  Number of unique frames:"
            print "  %s" % map(lambda c: c['uniqueframes'], captures)
            print

            print "  Average number of unique frames per second:"
            print "  %s" % map(lambda c: c['fps'], captures)
            print

            print "  Checkerboard area/duration (sum of percents NOT percentage):"
            print "  %s" % map(lambda c: c['checkerboard'], captures)
            print

        print "  Capture files:"
        print "  Capture files: %s" % map(lambda c: c['file'], captures)
        print

    if enable_profiling:
        print "  Profile files:"
        print "  Profile files: %s" % map(lambda c: c['profile'], captures)
        print

    if get_internal_checkerboard_stats:
        print "  Internal Checkerboard Stats (sum of percents, not percentage):"
        print "  %s" % map(lambda c: c['internalcheckerboard'], captures)
        print

    if outputfile:
        resultdict = { 'title': testname, 'data': {} }
        if os.path.isfile(outputfile):
            resultdict.update(json.loads(open(outputfile).read()))

        if not resultdict['data'].get(appkey):
            resultdict['data'][appkey] = []
        resultdict['data'][appkey].extend(captures)

        with open(outputfile, 'w') as f:
            f.write(json.dumps(resultdict))