예제 #1
0
def run_comb_perm_search(search):
    #Create progressbar to show how many searches have been done, removing eta
    search.progressbar = ProgressBar(total_items=get_num_comb_perm(
        string_options=search.string_options, num_chars=search.num_chars))
    search.progressbar(num_compelted=0)

    #Get all public butets that have been found so far
    search.buckets_found = get_buckets_found(search.output_file)
    #Create a string generator
    search.string_generator = createStringGenerator(search.string_options,
                                                    search.num_chars)

    #Check and see if a start after value was provided
    if search.start_after_value:
        search.start_after_found = False
    else:
        search.start_after_found = True

    #See if a stop at value is seen
    if search.stop_at_value:
        search.stop_at_found = False
    else:
        search.stop_at_found = False

    my_queue = Queue.Queue()
    for i in range(search.threads):
        t = threading.Thread(target=search_instance, args=(search, ))
        my_queue.put(t)

    #Run all of the threads
    while not my_queue.empty():
        my_queue.get().start()
예제 #2
0
def run_random_search(search):
    #Create progressbar to show how many searches have been done, removing eta
    search.progressbar = ProgressBar(0)
    search.progressbar.fmt = '''%(percent)3d%% %(bar)s %(current)s/%(total_items)s   %(items_per_sec)s   Run time: %(run_time)s   Bucket: %(bucket_name)s'''

    buckets_found = get_buckets_found(search.output_file)

    #Get all public butets that have been found so far
    search.buckets_found = get_buckets_found(search.output_file)
    #Create a string generator
    search.string_generator = createStringGenerator(search)

    my_queue = Queue.Queue()
    for i in range(search.threads):
        t = threading.Thread(target=search_instance, args=(search, ))
        my_queue.put(t)

    #Run all of the threads
    while not my_queue.empty():
        my_queue.get().start()
예제 #3
0
def download(url, destPath=None, addName=True, clear=False):
    global pbar
    
    origUrl = url
    redirectedUrl = urllib.urlopen(url).geturl()
    if redirectedUrl != url:
        print >> sys.stderr, "Redirected to", redirectedUrl
    if destPath == None:
        destPath = os.path.join(tempfile.gettempdir(), "CAMDA2014")
    destFileName = destPath
    if addName:
        destFileName = destPath + "/" + os.path.basename(origUrl)
    if not os.path.exists(os.path.dirname(destFileName)):
        os.makedirs(os.path.dirname(destFileName))
    if clear or not os.path.exists(destFileName):
        if os.path.exists(destFileName): # clear existing file
            os.remove(destFileName)
        print >> sys.stderr, "Downloading file", redirectedUrl, "to", destFileName
        widgets = [FileTransferSpeed(),' <<<', Bar(), '>>> ', Percentage(),' ', ETA()]
        pbar = ProgressBar(widgets=widgets, maxval=100)
        pbar.start()
        try:
            try:
                ExceptionOn404().retrieve(redirectedUrl, destFileName, reporthook=downloadProgress)
            except IOError, e:
                print >> sys.stderr, e.errno
                print >> sys.stderr, "Error downloading file", redirectedUrl
                pbar.finish()
                pbar = None
                print >> sys.stderr, "Attempting download with wget"
                downloadWget(origUrl, destFileName)
                if os.path.exists(destFileName):
                    return destFileName
                else:
                    print >> sys.stderr, "Error downloading file", origUrl, "with wget"
                    return None
        except:
            pass
        pbar.finish()
        pbar = None
    else:
        print >> sys.stderr, "Skipping already downloaded file", url
    return destFileName
예제 #4
0
def parse_args():
    """
    Parse command line arguments.
    Returns a tuple of: (options, js_shell, requested_paths, excluded_paths)
        options :object: The raw OptionParser output.
        js_shell :str: The absolute location of the shell to test with.
        requested_paths :set<str>: Test paths specially requested on the CLI.
        excluded_paths :set<str>: Test paths specifically excluded by the CLI.
    """
    from optparse import OptionParser, OptionGroup
    op = OptionParser(usage=textwrap.dedent("""
        %prog [OPTIONS] JS_SHELL [TESTS]

        Shell output format: [ pass | fail | timeout | skip ] progress | time
        """).strip())
    op.add_option('--xul-info', dest='xul_info_src',
                  help='config data for xulRuntime'
                  ' (avoids search for config/autoconf.mk)')

    harness_og = OptionGroup(op, "Harness Controls",
                             "Control how tests are run.")
    harness_og.add_option('-j', '--worker-count', type=int,
                          default=max(1, get_cpu_count()),
                          help='Number of tests to run in parallel'
                          ' (default %default)')
    harness_og.add_option('-t', '--timeout', type=float, default=150.0,
                          help='Set maximum time a test is allows to run'
                          ' (in seconds).')
    harness_og.add_option('--show-slow', action='store_true',
                          help='Show tests taking longer than a minimum time'
                          ' (in seconds).')
    harness_og.add_option('--slow-test-threshold', type=float, default=5.0,
                          help='Time in seconds a test can take until it is'
                          'considered slow (default %default).')
    harness_og.add_option('-a', '--args', dest='shell_args', default='',
                          help='Extra args to pass to the JS shell.')
    harness_og.add_option('--feature-args', dest='feature_args', default='',
                          help='Extra args to pass to the JS shell even when feature-testing.')
    harness_og.add_option('--jitflags', dest='jitflags', default='none',
                          type='string',
                          help='IonMonkey option combinations. One of all,'
                          ' debug, ion, and none (default %default).')
    harness_og.add_option('--tbpl', action='store_true',
                          help='Runs each test in all configurations tbpl'
                          ' tests.')
    harness_og.add_option('--tbpl-debug', action='store_true',
                          help='Runs each test in some faster configurations'
                          ' tbpl tests.')
    harness_og.add_option('-g', '--debug', action='store_true',
                          help='Run a test in debugger.')
    harness_og.add_option('--debugger', default='gdb -q --args',
                          help='Debugger command.')
    harness_og.add_option('-J', '--jorendb', action='store_true',
                          help='Run under JS debugger.')
    harness_og.add_option('--passthrough', action='store_true',
                          help='Run tests with stdin/stdout attached to'
                          ' caller.')
    harness_og.add_option('--test-reflect-stringify', dest="test_reflect_stringify",
                          help="instead of running tests, use them to test the "
                          "Reflect.stringify code in specified file")
    harness_og.add_option('--valgrind', action='store_true',
                          help='Run tests in valgrind.')
    harness_og.add_option('--valgrind-args', default='',
                          help='Extra args to pass to valgrind.')
    harness_og.add_option('--rr', action='store_true',
                          help='Run tests under RR record-and-replay debugger.')
    harness_og.add_option('-C', '--check-output', action='store_true',
                          help='Run tests to check output for different jit-flags')
    op.add_option_group(harness_og)

    input_og = OptionGroup(op, "Inputs", "Change what tests are run.")
    input_og.add_option('-f', '--file', dest='test_file', action='append',
                        help='Get tests from the given file.')
    input_og.add_option('-x', '--exclude-file', action='append',
                        help='Exclude tests from the given file.')
    input_og.add_option('--wpt', dest='wpt',
                        type='choice',
                        choices=['enabled', 'disabled', 'if-running-everything'],
                        default='if-running-everything',
                        help="Enable or disable shell web-platform-tests "
                        "(default: enable if no test paths are specified).")
    input_og.add_option('--include', action='append', dest='requested_paths', default=[],
                        help='Include the given test file or directory.')
    input_og.add_option('--exclude', action='append', dest='excluded_paths', default=[],
                        help='Exclude the given test file or directory.')
    input_og.add_option('-d', '--exclude-random', dest='random',
                        action='store_false',
                        help='Exclude tests marked as "random."')
    input_og.add_option('--run-skipped', action='store_true',
                        help='Run tests marked as "skip."')
    input_og.add_option('--run-only-skipped', action='store_true',
                        help='Run only tests marked as "skip."')
    input_og.add_option('--run-slow-tests', action='store_true',
                        help='Do not skip tests marked as "slow."')
    input_og.add_option('--no-extensions', action='store_true',
                        help='Run only tests conforming to the ECMAScript 5'
                        ' standard.')
    input_og.add_option('--repeat', type=int, default=1,
                        help='Repeat tests the given number of times.')
    op.add_option_group(input_og)

    output_og = OptionGroup(op, "Output",
                            "Modify the harness and tests output.")
    output_og.add_option('-s', '--show-cmd', action='store_true',
                         help='Show exact commandline used to run each test.')
    output_og.add_option('-o', '--show-output', action='store_true',
                         help="Print each test's output to the file given by"
                         " --output-file.")
    output_og.add_option('-F', '--failed-only', action='store_true',
                         help="If a --show-* option is given, only print"
                         " output for failed tests.")
    output_og.add_option('--no-show-failed', action='store_true',
                         help="Don't print output for failed tests"
                         " (no-op with --show-output).")
    output_og.add_option('-O', '--output-file',
                         help='Write all output to the given file'
                         ' (default: stdout).')
    output_og.add_option('--failure-file',
                         help='Write all not-passed tests to the given file.')
    output_og.add_option('--no-progress', dest='hide_progress',
                         action='store_true',
                         help='Do not show the progress bar.')
    output_og.add_option('--tinderbox', dest='format', action='store_const',
                         const='automation',
                         help='Use automation-parseable output format.')
    output_og.add_option('--format', dest='format', default='none',
                         type='choice', choices=['automation', 'none'],
                         help='Output format. Either automation or none'
                         ' (default %default).')
    output_og.add_option('--log-wptreport', dest='wptreport', action='store',
                         help='Path to write a Web Platform Tests report (wptreport)')
    op.add_option_group(output_og)

    special_og = OptionGroup(op, "Special",
                             "Special modes that do not run tests.")
    special_og.add_option('--make-manifests', metavar='BASE_TEST_PATH',
                          help='Generate reftest manifest files.')
    op.add_option_group(special_og)
    options, args = op.parse_args()

    # Acquire the JS shell given on the command line.
    options.js_shell = None
    requested_paths = set(options.requested_paths)
    if len(args) > 0:
        options.js_shell = abspath(args[0])
        requested_paths |= set(args[1:])

    # If we do not have a shell, we must be in a special mode.
    if options.js_shell is None and not options.make_manifests:
        op.error('missing JS_SHELL argument')

    # Valgrind, gdb, and rr are mutually exclusive.
    if sum(map(lambda e: 1 if e else 0, [options.valgrind, options.debug, options.rr])) > 1:
        op.error("--valgrind, --debug, and --rr are mutually exclusive.")

    # Fill the debugger field, as needed.
    if options.debug:
        if options.debugger == 'lldb':
            debugger_prefix = ['lldb', '--']
        else:
            debugger_prefix = options.debugger.split()
    else:
        debugger_prefix = []

    if options.valgrind:
        debugger_prefix = ['valgrind'] + options.valgrind_args.split()
        if os.uname()[0] == 'Darwin':
            debugger_prefix.append('--dsymutil=yes')
        options.show_output = True
    if options.rr:
        debugger_prefix = ['rr', 'record']

    js_cmd_args = shlex.split(options.shell_args) + shlex.split(options.feature_args)
    if options.jorendb:
        options.passthrough = True
        options.hide_progress = True
        options.worker_count = 1
        debugger_path = realpath(os.path.join(
            abspath(dirname(abspath(__file__))),
            '..', '..', 'examples', 'jorendb.js'))
        js_cmd_args.extend(['-d', '-f', debugger_path, '--'])
    prefix = RefTestCase.build_js_cmd_prefix(options.js_shell, js_cmd_args,
                                             debugger_prefix)

    # If files with lists of tests to run were specified, add them to the
    # requested tests set.
    if options.test_file:
        for test_file in options.test_file:
            requested_paths |= set(
                [line.strip() for line in open(test_file).readlines()])

    excluded_paths = set(options.excluded_paths)

    # If files with lists of tests to exclude were specified, add them to the
    # excluded tests set.
    if options.exclude_file:
        for filename in options.exclude_file:
            with open(filename, 'r') as fp:
                for line in fp:
                    if line.startswith('#'):
                        continue
                    line = line.strip()
                    if not line:
                        continue
                    excluded_paths.add(line)

    # Handle output redirection, if requested and relevant.
    options.output_fp = sys.stdout
    if options.output_file:
        if not options.show_cmd:
            options.show_output = True
        try:
            options.output_fp = open(options.output_file, 'w')
        except IOError as ex:
            raise SystemExit("Failed to open output file: " + str(ex))

    # Hide the progress bar if it will get in the way of other output.
    options.hide_progress = (options.format == 'automation' or
                             not ProgressBar.conservative_isatty() or
                             options.hide_progress)

    return (options, prefix, requested_paths, excluded_paths)
예제 #5
0
def parse_args():
    """
    Parse command line arguments.
    Returns a tuple of: (options, js_shell, requested_paths, excluded_paths)
        options :object: The raw OptionParser output.
        js_shell :str: The absolute location of the shell to test with.
        requested_paths :set<str>: Test paths specially requested on the CLI.
        excluded_paths :set<str>: Test paths specifically excluded by the CLI.
    """
    from optparse import OptionParser, OptionGroup

    op = OptionParser(
        usage=textwrap.dedent(
            """
        %prog [OPTIONS] JS_SHELL [TESTS]

        Shell output format: [ pass | fail | timeout | skip ] progress | time
        """
        ).strip()
    )
    op.add_option(
        "--xul-info", dest="xul_info_src", help="config data for xulRuntime" " (avoids search for config/autoconf.mk)"
    )

    harness_og = OptionGroup(op, "Harness Controls", "Control how tests are run.")
    harness_og.add_option(
        "-j",
        "--worker-count",
        type=int,
        default=max(1, get_cpu_count()),
        help="Number of tests to run in parallel" " (default %default)",
    )
    harness_og.add_option(
        "-t", "--timeout", type=float, default=150.0, help="Set maximum time a test is allows to run" " (in seconds)."
    )
    harness_og.add_option("-a", "--args", dest="shell_args", default="", help="Extra args to pass to the JS shell.")
    harness_og.add_option(
        "--jitflags",
        dest="jitflags",
        default="none",
        type="string",
        help="IonMonkey option combinations. One of all," " debug, ion, and none (default %default).",
    )
    harness_og.add_option("--tbpl", action="store_true", help="Runs each test in all configurations tbpl" " tests.")
    harness_og.add_option(
        "--tbpl-debug", action="store_true", help="Runs each test in some faster configurations" " tbpl tests."
    )
    harness_og.add_option("-g", "--debug", action="store_true", help="Run a test in debugger.")
    harness_og.add_option("--debugger", default="gdb -q --args", help="Debugger command.")
    harness_og.add_option("-J", "--jorendb", action="store_true", help="Run under JS debugger.")
    harness_og.add_option(
        "--passthrough", action="store_true", help="Run tests with stdin/stdout attached to" " caller."
    )
    harness_og.add_option("--valgrind", action="store_true", help="Run tests in valgrind.")
    harness_og.add_option("--valgrind-args", default="", help="Extra args to pass to valgrind.")
    op.add_option_group(harness_og)

    input_og = OptionGroup(op, "Inputs", "Change what tests are run.")
    input_og.add_option("-f", "--file", dest="test_file", action="append", help="Get tests from the given file.")
    input_og.add_option("-x", "--exclude-file", action="append", help="Exclude tests from the given file.")
    input_og.add_option(
        "-d", "--exclude-random", dest="random", action="store_false", help='Exclude tests marked as "random."'
    )
    input_og.add_option("--run-skipped", action="store_true", help='Run tests marked as "skip."')
    input_og.add_option("--run-only-skipped", action="store_true", help='Run only tests marked as "skip."')
    input_og.add_option("--run-slow-tests", action="store_true", help='Do not skip tests marked as "slow."')
    input_og.add_option(
        "--no-extensions", action="store_true", help="Run only tests conforming to the ECMAScript 5" " standard."
    )
    op.add_option_group(input_og)

    output_og = OptionGroup(op, "Output", "Modify the harness and tests output.")
    output_og.add_option("-s", "--show-cmd", action="store_true", help="Show exact commandline used to run each test.")
    output_og.add_option(
        "-o",
        "--show-output",
        action="store_true",
        help="Print each test's output to the file given by" " --output-file.",
    )
    output_og.add_option(
        "-F",
        "--failed-only",
        action="store_true",
        help="If a --show-* option is given, only print" " output for failed tests.",
    )
    output_og.add_option(
        "--no-show-failed",
        action="store_true",
        help="Don't print output for failed tests" " (no-op with --show-output).",
    )
    output_og.add_option("-O", "--output-file", help="Write all output to the given file" " (default: stdout).")
    output_og.add_option("--failure-file", help="Write all not-passed tests to the given file.")
    output_og.add_option(
        "--no-progress", dest="hide_progress", action="store_true", help="Do not show the progress bar."
    )
    output_og.add_option(
        "--tinderbox",
        dest="format",
        action="store_const",
        const="automation",
        help="Use automation-parseable output format.",
    )
    output_og.add_option(
        "--format",
        dest="format",
        default="none",
        type="choice",
        choices=["automation", "none"],
        help="Output format. Either automation or none" " (default %default).",
    )
    op.add_option_group(output_og)

    special_og = OptionGroup(op, "Special", "Special modes that do not run tests.")
    special_og.add_option("--make-manifests", metavar="BASE_TEST_PATH", help="Generate reftest manifest files.")
    op.add_option_group(special_og)
    options, args = op.parse_args()

    # Acquire the JS shell given on the command line.
    options.js_shell = None
    requested_paths = set()
    if len(args) > 0:
        options.js_shell = abspath(args[0])
        requested_paths |= set(args[1:])

    # If we do not have a shell, we must be in a special mode.
    if options.js_shell is None and not options.make_manifests:
        op.error("missing JS_SHELL argument")

    # Valgrind and gdb are mutually exclusive.
    if options.valgrind and options.debug:
        op.error("--valgrind and --debug are mutually exclusive.")

    # Fill the debugger field, as needed.
    prefix = options.debugger.split() if options.debug else []
    if options.valgrind:
        prefix = ["valgrind"] + options.valgrind_args.split()
        if os.uname()[0] == "Darwin":
            prefix.append("--dsymutil=yes")
        options.show_output = True

    js_cmd_args = options.shell_args.split()
    if options.jorendb:
        options.passthrough = True
        options.hide_progress = True
        options.worker_count = 1
        debugger_path = realpath(
            os.path.join(abspath(dirname(abspath(__file__))), "..", "..", "examples", "jorendb.js")
        )
        js_cmd_args.extend(["-d", "-f", debugger_path, "--"])
    TestCase.set_js_cmd_prefix(options.js_shell, js_cmd_args, prefix)

    # If files with lists of tests to run were specified, add them to the
    # requested tests set.
    if options.test_file:
        for test_file in options.test_file:
            requested_paths |= set([line.strip() for line in open(test_file).readlines()])

    # If files with lists of tests to exclude were specified, add them to the
    # excluded tests set.
    excluded_paths = set()
    if options.exclude_file:
        for filename in options.exclude_file:
            try:
                fp = open(filename, "r")
                for line in fp:
                    if line.startswith("#"):
                        continue
                    line = line.strip()
                    if not line:
                        continue
                    excluded_paths |= set((line,))
            finally:
                fp.close()

    # Handle output redirection, if requested and relevant.
    options.output_fp = sys.stdout
    if options.output_file:
        if not options.show_cmd:
            options.show_output = True
        try:
            options.output_fp = open(options.output_file, "w")
        except IOError as ex:
            raise SystemExit("Failed to open output file: " + str(ex))

    # Hide the progress bar if it will get in the way of other output.
    options.hide_progress = (
        options.format == "automation" or not ProgressBar.conservative_isatty() or options.hide_progress
    )

    return (options, requested_paths, excluded_paths)
예제 #6
0
def parse_args():
    """
    Parse command line arguments.
    Returns a tuple of: (options, js_shell, requested_paths, excluded_paths)
        options :object: The raw OptionParser output.
        js_shell :str: The absolute location of the shell to test with.
        requested_paths :set<str>: Test paths specially requested on the CLI.
        excluded_paths :set<str>: Test paths specifically excluded by the CLI.
    """
    from optparse import OptionParser, OptionGroup
    op = OptionParser(usage=textwrap.dedent("""
        %prog [OPTIONS] JS_SHELL [TESTS]

        Shell output format: [ pass | fail | timeout | skip ] progress | time
        """).strip())
    op.add_option('--xul-info', dest='xul_info_src',
                  help='config data for xulRuntime'
                  ' (avoids search for config/autoconf.mk)')

    harness_og = OptionGroup(op, "Harness Controls",
                             "Control how tests are run.")
    harness_og.add_option('-j', '--worker-count', type=int,
                          default=max(1, get_cpu_count()),
                          help='Number of tests to run in parallel'
                          ' (default %default)')
    harness_og.add_option('-t', '--timeout', type=float, default=150.0,
                          help='Set maximum time a test is allows to run'
                          ' (in seconds).')
    harness_og.add_option('-a', '--args', dest='shell_args', default='',
                          help='Extra args to pass to the JS shell.')
    harness_og.add_option('--jitflags', dest='jitflags', default='none',
                          type='string',
                          help='IonMonkey option combinations. One of all,'
                          ' debug, ion, and none (default %default).')
    harness_og.add_option('--tbpl', action='store_true',
                          help='Runs each test in all configurations tbpl'
                          ' tests.')
    harness_og.add_option('--tbpl-debug', action='store_true',
                          help='Runs each test in some faster configurations'
                          ' tbpl tests.')
    harness_og.add_option('-g', '--debug', action='store_true',
                          help='Run a test in debugger.')
    harness_og.add_option('--debugger', default='gdb -q --args',
                          help='Debugger command.')
    harness_og.add_option('-J', '--jorendb', action='store_true',
                          help='Run under JS debugger.')
    harness_og.add_option('--passthrough', action='store_true',
                          help='Run tests with stdin/stdout attached to'
                          ' caller.')
    harness_og.add_option('--valgrind', action='store_true',
                          help='Run tests in valgrind.')
    harness_og.add_option('--valgrind-args', default='',
                          help='Extra args to pass to valgrind.')
    harness_og.add_option('--rr', action='store_true',
                          help='Run tests under RR record-and-replay debugger.')
    op.add_option_group(harness_og)

    input_og = OptionGroup(op, "Inputs", "Change what tests are run.")
    input_og.add_option('-f', '--file', dest='test_file', action='append',
                        help='Get tests from the given file.')
    input_og.add_option('-x', '--exclude-file', action='append',
                        help='Exclude tests from the given file.')
    input_og.add_option('-d', '--exclude-random', dest='random',
                        action='store_false',
                        help='Exclude tests marked as "random."')
    input_og.add_option('--run-skipped', action='store_true',
                        help='Run tests marked as "skip."')
    input_og.add_option('--run-only-skipped', action='store_true',
                        help='Run only tests marked as "skip."')
    input_og.add_option('--run-slow-tests', action='store_true',
                        help='Do not skip tests marked as "slow."')
    input_og.add_option('--no-extensions', action='store_true',
                        help='Run only tests conforming to the ECMAScript 5'
                        ' standard.')
    op.add_option_group(input_og)

    output_og = OptionGroup(op, "Output",
                            "Modify the harness and tests output.")
    output_og.add_option('-s', '--show-cmd', action='store_true',
                         help='Show exact commandline used to run each test.')
    output_og.add_option('-o', '--show-output', action='store_true',
                         help="Print each test's output to the file given by"
                         " --output-file.")
    output_og.add_option('-F', '--failed-only', action='store_true',
                         help="If a --show-* option is given, only print"
                         " output for failed tests.")
    output_og.add_option('--no-show-failed', action='store_true',
                         help="Don't print output for failed tests"
                         " (no-op with --show-output).")
    output_og.add_option('-O', '--output-file',
                         help='Write all output to the given file'
                         ' (default: stdout).')
    output_og.add_option('--failure-file',
                         help='Write all not-passed tests to the given file.')
    output_og.add_option('--no-progress', dest='hide_progress',
                         action='store_true',
                         help='Do not show the progress bar.')
    output_og.add_option('--tinderbox', dest='format', action='store_const',
                         const='automation',
                         help='Use automation-parseable output format.')
    output_og.add_option('--format', dest='format', default='none',
                          type='choice', choices=['automation', 'none'],
                          help='Output format. Either automation or none'
                         ' (default %default).')
    op.add_option_group(output_og)

    special_og = OptionGroup(op, "Special",
                             "Special modes that do not run tests.")
    special_og.add_option('--make-manifests', metavar='BASE_TEST_PATH',
                          help='Generate reftest manifest files.')
    op.add_option_group(special_og)
    options, args = op.parse_args()

    # Acquire the JS shell given on the command line.
    options.js_shell = None
    requested_paths = set()
    if len(args) > 0:
        options.js_shell = abspath(args[0])
        requested_paths |= set(args[1:])

    # If we do not have a shell, we must be in a special mode.
    if options.js_shell is None and not options.make_manifests:
        op.error('missing JS_SHELL argument')

    # Valgrind, gdb, and rr are mutually exclusive.
    if sum(map(lambda e: 1 if e else 0, [options.valgrind, options.debug, options.rr])) > 1:
        op.error("--valgrind, --debug, and --rr are mutually exclusive.")

    # Fill the debugger field, as needed.
    prefix = options.debugger.split() if options.debug else []
    if options.valgrind:
        prefix = ['valgrind'] + options.valgrind_args.split()
        if os.uname()[0] == 'Darwin':
            prefix.append('--dsymutil=yes')
        options.show_output = True
    if options.rr:
        prefix = ['rr', 'record']

    js_cmd_args = options.shell_args.split()
    if options.jorendb:
        options.passthrough = True
        options.hide_progress = True
        options.worker_count = 1
        debugger_path = realpath(os.path.join(
            abspath(dirname(abspath(__file__))),
            '..', '..', 'examples', 'jorendb.js'))
        js_cmd_args.extend(['-d', '-f', debugger_path, '--'])
    TestCase.set_js_cmd_prefix(options.js_shell, js_cmd_args, prefix)

    # If files with lists of tests to run were specified, add them to the
    # requested tests set.
    if options.test_file:
        for test_file in options.test_file:
            requested_paths |= set(
                [line.strip() for line in open(test_file).readlines()])

    # If files with lists of tests to exclude were specified, add them to the
    # excluded tests set.
    excluded_paths = set()
    if options.exclude_file:
        for filename in options.exclude_file:
            try:
                fp = open(filename, 'r')
                for line in fp:
                    if line.startswith('#'): continue
                    line = line.strip()
                    if not line: continue
                    excluded_paths |= set((line,))
            finally:
                fp.close()

    # Handle output redirection, if requested and relevant.
    options.output_fp = sys.stdout
    if options.output_file:
        if not options.show_cmd:
            options.show_output = True
        try:
            options.output_fp = open(options.output_file, 'w')
        except IOError as ex:
            raise SystemExit("Failed to open output file: " + str(ex))

    # Hide the progress bar if it will get in the way of other output.
    options.hide_progress = (options.format == 'automation' or
                             not ProgressBar.conservative_isatty() or
                             options.hide_progress)

    return (options, requested_paths, excluded_paths)
예제 #7
0
def parse_args():
    """
    Parse command line arguments.
    Returns a tuple of: (options, js_shell, requested_paths, excluded_paths)
        options :object: The raw OptionParser output.
        js_shell :str: The absolute location of the shell to test with.
        requested_paths :set<str>: Test paths specially requested on the CLI.
        excluded_paths :set<str>: Test paths specifically excluded by the CLI.
    """
    from argparse import ArgumentParser

    op = ArgumentParser(
        description="Run jstests JS shell tests",
        epilog="Shell output format: [ pass | fail | timeout | skip ] progress | time",
    )
    op.add_argument(
        "--xul-info",
        dest="xul_info_src",
        help="config data for xulRuntime" " (avoids search for config/autoconf.mk)",
    )

    harness_og = op.add_argument_group("Harness Controls", "Control how tests are run.")
    harness_og.add_argument(
        "-j",
        "--worker-count",
        type=int,
        default=max(1, get_cpu_count()),
        help="Number of tests to run in parallel" " (default %(default)s)",
    )
    harness_og.add_argument(
        "-t",
        "--timeout",
        type=float,
        default=150.0,
        help="Set maximum time a test is allows to run" " (in seconds).",
    )
    harness_og.add_argument(
        "--show-slow",
        action="store_true",
        help="Show tests taking longer than a minimum time" " (in seconds).",
    )
    harness_og.add_argument(
        "--slow-test-threshold",
        type=float,
        default=5.0,
        help="Time in seconds a test can take until it is"
        "considered slow (default %(default)s).",
    )
    harness_og.add_argument(
        "-a",
        "--args",
        dest="shell_args",
        default="",
        help="Extra args to pass to the JS shell.",
    )
    harness_og.add_argument(
        "--feature-args",
        dest="feature_args",
        default="",
        help="Extra args to pass to the JS shell even when feature-testing.",
    )
    harness_og.add_argument(
        "--jitflags",
        dest="jitflags",
        default="none",
        type=str,
        help="IonMonkey option combinations. One of all,"
        " debug, ion, and none (default %(default)s).",
    )
    harness_og.add_argument(
        "--tbpl",
        action="store_true",
        help="Runs each test in all configurations tbpl" " tests.",
    )
    harness_og.add_argument(
        "--tbpl-debug",
        action="store_true",
        help="Runs each test in some faster configurations" " tbpl tests.",
    )
    harness_og.add_argument(
        "-g", "--debug", action="store_true", help="Run a test in debugger."
    )
    harness_og.add_argument(
        "--debugger", default="gdb -q --args", help="Debugger command."
    )
    harness_og.add_argument(
        "-J", "--jorendb", action="store_true", help="Run under JS debugger."
    )
    harness_og.add_argument(
        "--passthrough",
        action="store_true",
        help="Run tests with stdin/stdout attached to" " caller.",
    )
    harness_og.add_argument(
        "--test-reflect-stringify",
        dest="test_reflect_stringify",
        help="instead of running tests, use them to test the "
        "Reflect.stringify code in specified file",
    )
    harness_og.add_argument(
        "--valgrind", action="store_true", help="Run tests in valgrind."
    )
    harness_og.add_argument(
        "--valgrind-args", default="", help="Extra args to pass to valgrind."
    )
    harness_og.add_argument(
        "--rr",
        action="store_true",
        help="Run tests under RR record-and-replay debugger.",
    )
    harness_og.add_argument(
        "-C",
        "--check-output",
        action="store_true",
        help="Run tests to check output for different jit-flags",
    )
    harness_og.add_argument(
        "--remote", action="store_true", help="Run tests on a remote device"
    )
    harness_og.add_argument(
        "--deviceIP",
        action="store",
        type=str,
        dest="device_ip",
        help="IP address of remote device to test",
    )
    harness_og.add_argument(
        "--devicePort",
        action="store",
        type=int,
        dest="device_port",
        default=20701,
        help="port of remote device to test",
    )
    harness_og.add_argument(
        "--deviceSerial",
        action="store",
        type=str,
        dest="device_serial",
        default=None,
        help="ADB device serial number of remote device to test",
    )
    harness_og.add_argument(
        "--remoteTestRoot",
        dest="remote_test_root",
        action="store",
        type=str,
        default="/data/local/tmp/test_root",
        help="The remote directory to use as test root" " (e.g. %(default)s)",
    )
    harness_og.add_argument(
        "--localLib",
        dest="local_lib",
        action="store",
        type=str,
        help="The location of libraries to push -- preferably" " stripped",
    )
    harness_og.add_argument(
        "--no-xdr",
        dest="use_xdr",
        action="store_false",
        help="Whether to disable caching of self-hosted parsed content in XDR format.",
    )

    input_og = op.add_argument_group("Inputs", "Change what tests are run.")
    input_og.add_argument(
        "-f",
        "--file",
        dest="test_file",
        action="append",
        help="Get tests from the given file.",
    )
    input_og.add_argument(
        "-x",
        "--exclude-file",
        action="append",
        help="Exclude tests from the given file.",
    )
    input_og.add_argument(
        "--wpt",
        dest="wpt",
        choices=["enabled", "disabled", "if-running-everything"],
        default="if-running-everything",
        help="Enable or disable shell web-platform-tests "
        "(default: enable if no test paths are specified).",
    )
    input_og.add_argument(
        "--include",
        action="append",
        dest="requested_paths",
        default=[],
        help="Include the given test file or directory.",
    )
    input_og.add_argument(
        "--exclude",
        action="append",
        dest="excluded_paths",
        default=[],
        help="Exclude the given test file or directory.",
    )
    input_og.add_argument(
        "-d",
        "--exclude-random",
        dest="random",
        action="store_false",
        help='Exclude tests marked as "random."',
    )
    input_og.add_argument(
        "--run-skipped", action="store_true", help='Run tests marked as "skip."'
    )
    input_og.add_argument(
        "--run-only-skipped",
        action="store_true",
        help='Run only tests marked as "skip."',
    )
    input_og.add_argument(
        "--run-slow-tests",
        action="store_true",
        help='Do not skip tests marked as "slow."',
    )
    input_og.add_argument(
        "--no-extensions",
        action="store_true",
        help="Run only tests conforming to the ECMAScript 5" " standard.",
    )
    input_og.add_argument(
        "--repeat", type=int, default=1, help="Repeat tests the given number of times."
    )

    output_og = op.add_argument_group("Output", "Modify the harness and tests output.")
    output_og.add_argument(
        "-s",
        "--show-cmd",
        action="store_true",
        help="Show exact commandline used to run each test.",
    )
    output_og.add_argument(
        "-o",
        "--show-output",
        action="store_true",
        help="Print each test's output to the file given by" " --output-file.",
    )
    output_og.add_argument(
        "-F",
        "--failed-only",
        action="store_true",
        help="If a --show-* option is given, only print" " output for failed tests.",
    )
    output_og.add_argument(
        "--no-show-failed",
        action="store_true",
        help="Don't print output for failed tests" " (no-op with --show-output).",
    )
    output_og.add_argument(
        "-O",
        "--output-file",
        help="Write all output to the given file" " (default: stdout).",
    )
    output_og.add_argument(
        "--failure-file", help="Write all not-passed tests to the given file."
    )
    output_og.add_argument(
        "--no-progress",
        dest="hide_progress",
        action="store_true",
        help="Do not show the progress bar.",
    )
    output_og.add_argument(
        "--tinderbox",
        dest="format",
        action="store_const",
        const="automation",
        help="Use automation-parseable output format.",
    )
    output_og.add_argument(
        "--format",
        dest="format",
        default="none",
        choices=["automation", "none"],
        help="Output format. Either automation or none" " (default %(default)s).",
    )
    output_og.add_argument(
        "--log-wptreport",
        dest="wptreport",
        action="store",
        help="Path to write a Web Platform Tests report (wptreport)",
    )
    output_og.add_argument(
        "--this-chunk", type=int, default=1, help="The test chunk to run."
    )
    output_og.add_argument(
        "--total-chunks", type=int, default=1, help="The total number of test chunks."
    )

    special_og = op.add_argument_group(
        "Special", "Special modes that do not run tests."
    )
    special_og.add_argument(
        "--make-manifests",
        metavar="BASE_TEST_PATH",
        help="Generate reftest manifest files.",
    )

    op.add_argument("--js-shell", metavar="JS_SHELL", help="JS shell to run tests with")
    op.add_argument(
        "-z", "--gc-zeal", help="GC zeal mode to use when running the shell"
    )

    options, args = op.parse_known_args()

    # Need a shell unless in a special mode.
    if not options.make_manifests:
        if not args:
            op.error("missing JS_SHELL argument")
        options.js_shell = os.path.abspath(args.pop(0))

    requested_paths = set(args)

    # Valgrind, gdb, and rr are mutually exclusive.
    if sum(map(bool, (options.valgrind, options.debug, options.rr))) > 1:
        op.error("--valgrind, --debug, and --rr are mutually exclusive.")

    # Fill the debugger field, as needed.
    if options.debug:
        if options.debugger == "lldb":
            debugger_prefix = ["lldb", "--"]
        else:
            debugger_prefix = options.debugger.split()
    else:
        debugger_prefix = []

    if options.valgrind:
        debugger_prefix = ["valgrind"] + options.valgrind_args.split()
        if os.uname()[0] == "Darwin":
            debugger_prefix.append("--dsymutil=yes")
        options.show_output = True
    if options.rr:
        debugger_prefix = ["rr", "record"]

    js_cmd_args = shlex.split(options.shell_args) + shlex.split(options.feature_args)
    if options.jorendb:
        options.passthrough = True
        options.hide_progress = True
        options.worker_count = 1
        debugger_path = realpath(
            os.path.join(
                abspath(dirname(abspath(__file__))),
                "..",
                "..",
                "examples",
                "jorendb.js",
            )
        )
        js_cmd_args.extend(["-d", "-f", debugger_path, "--"])
    prefix = RefTestCase.build_js_cmd_prefix(
        options.js_shell, js_cmd_args, debugger_prefix
    )

    # If files with lists of tests to run were specified, add them to the
    # requested tests set.
    if options.test_file:
        for test_file in options.test_file:
            requested_paths |= set(
                [line.strip() for line in open(test_file).readlines()]
            )

    excluded_paths = set(options.excluded_paths)

    # If files with lists of tests to exclude were specified, add them to the
    # excluded tests set.
    if options.exclude_file:
        for filename in options.exclude_file:
            with open(filename, "r") as fp:
                for line in fp:
                    if line.startswith("#"):
                        continue
                    line = line.strip()
                    if not line:
                        continue
                    excluded_paths.add(line)

    # Handle output redirection, if requested and relevant.
    options.output_fp = sys.stdout
    if options.output_file:
        if not options.show_cmd:
            options.show_output = True
        try:
            options.output_fp = open(options.output_file, "w")
        except IOError as ex:
            raise SystemExit("Failed to open output file: " + str(ex))

    # Hide the progress bar if it will get in the way of other output.
    options.hide_progress = (
        options.format == "automation"
        or not ProgressBar.conservative_isatty()
        or options.hide_progress
    )

    return (options, prefix, requested_paths, excluded_paths)
예제 #8
0
파일: qark.py 프로젝트: zhouat/qark
    try:
        clear_amount = (len(PROGRESS_BARS) - 1) * 2
        for plugin in manager.getAllPlugins():
            clear_amount += 2
        clear_lines(clear_amount)

        # TODO: change to list comprehension to make it more pythonic
        # all static writers included in every static analysis
        writers = [common.Writer((0, height-8)), common.Writer((0, height-6)), common.Writer((0, height-4)), 
                    common.Writer((0, height-2)), common.Writer((0, height-10)), common.Writer((0, height-12)), common.Writer((0, height-14))]
        pbars = {}
   
        # create dictionary for progress bars, pbars = { name: ProgressBar }
        for barNum in range(len(PROGRESS_BARS)-1):
            pbars[PROGRESS_BARS[barNum]] = ProgressBar(widgets=[PROGRESS_BARS[barNum], Percentage(), Bar()], maxval=100, fd=writers[barNum]).start()

        # create writer and progress bar for each plugin
        placer = 0
        for plugin in manager.getAllPlugins():
            writer = common.Writer((0, height-(16+placer)))
            writers.append(writer)
            if 'Plugin issues' not in pbars:
                pbars['Plugin issues'] = {}

            pbars['Plugin issues'][plugin.plugin_object.getName()] = ProgressBar(widgets=[plugin.plugin_object.getName(), Percentage(), Bar()], maxval=100, fd=writer).start() 
            placer += 2

        pub.subscribe(progress_bar_update, 'progress')

        #Create static analysis threads
예제 #9
0
def metric_downloader(workbench, outputfolder):

    log = Logger("Measurement Downloader")

    conn = sqlite3.connect(workbench)
    curs = conn.cursor()
    visits = {}
    for row in curs.execute(
            'SELECT WatershedName, SiteName, VisitYear, V.VisitID'
            ' FROM CHaMP_Visits V'
            ' INNER JOIN CHaMP_Sites S ON V.SiteID = S.SiteID'
            ' INNER JOIN CHaMP_Watersheds W ON S.WatershedID = W.WatershedID'
            ' WHERE V.ProgramID IN (1, 5, 6)'
            ' AND W.WatershedID IN (15, 32)'  # NOT IN ("Asotin", "Big-Navarro-Garcia (CA)", "CHaMP Training")'
            ' ORDER BY WatershedName, visitYear'):

        if not row[0] in visits:
            visits[row[0]] = []
        visits[row[0]].append({
            'VisitID': row[3],
            'Year': row[2],
            'Site': row[1]
        })

    watersheds = list(visits.keys())
    watersheds.sort()
    curs.close()

    for watershed in watersheds:
        visitCount = len(visits[watershed])

        p = ProgressBar(end=len(visits[watershed]),
                        width=20,
                        fill='=',
                        blank='.',
                        format='[%(fill)s>%(blank)s] %(progress)s%%')

        for visit in visits[watershed]:
            p + 1
            print p

            visit_path = os.path.join(outputfolder, str(visit['Year']),
                                      watershed.replace(' ', ''),
                                      visit['Site'].replace(' ', ''),
                                      'VISIT_{}'.format(visit['VisitID']))

            measurements = APIGet("visits/{0}/measurements".format(
                visit['VisitID']))
            for meas in measurements:

                if not os.path.isdir(visit_path):
                    os.makedirs(visit_path)
                meas_path = os.path.join(
                    visit_path, '{}.json'.format(meas['name'].replace(' ',
                                                                      '')))

                data = APIGet(meas['url'], True)

                json_string = json.dumps(data['values'])
                with open(meas_path, 'w') as outfile:
                    json.dump(data, outfile)

    print('Process completed')
예제 #10
0
    # Handle output redirection, if requested and relevant.
    options.output_fp = sys.stdout
    if options.output_file:
        if not options.show_cmd:
            options.show_output = True
        try:
            options.output_fp = open(options.output_file, 'w')
        except IOError, ex:
            raise SystemExit("Failed to open output file: " + str(ex))

    options.show = options.show_cmd or options.show_output

    # Hide the progress bar if it will get in the way of other output.
    options.hide_progress = (options.tinderbox
                             or ProgressBar.conservative_isatty()
                             or options.hide_progress)

    return (options, requested_paths, excluded_paths)


def parse_jitflags(op_jitflags):
    jitflags = [['-' + flag for flag in flags]
                for flags in op_jitflags.split(',')]
    for flags in jitflags:
        for flag in flags:
            if flag not in ('-m', '-a', '-p', '-d', '-n'):
                print('Invalid jit flag: "%s"' % flag)
                sys.exit(1)
    return jitflags
예제 #11
0
def parse_args():
    """
    Parse command line arguments.
    Returns a tuple of: (options, js_shell, requested_paths, excluded_paths)
        options :object: The raw OptionParser output.
        js_shell :str: The absolute location of the shell to test with.
        requested_paths :set<str>: Test paths specially requested on the CLI.
        excluded_paths :set<str>: Test paths specifically excluded by the CLI.
    """
    from optparse import OptionParser, OptionGroup
    op = OptionParser(usage=textwrap.dedent("""
        %prog [OPTIONS] JS_SHELL [TESTS]

        Shell output format: [ pass | fail | timeout | skip ] progress | time
        """).strip())
    op.add_option(
        '--xul-info',
        dest='xul_info_src',
        help='config data for xulRuntime (avoids search for config/autoconf.mk)'
    )

    harness_og = OptionGroup(op, "Harness Controls",
                             "Control how tests are run.")
    harness_og.add_option(
        '-j',
        '--worker-count',
        type=int,
        default=max(1, get_cpu_count()),
        help='Number of tests to run in parallel (default %default)')
    harness_og.add_option(
        '-t',
        '--timeout',
        type=float,
        default=150.0,
        help='Set maximum time a test is allows to run (in seconds).')
    harness_og.add_option('-a',
                          '--args',
                          dest='shell_args',
                          default='',
                          help='Extra args to pass to the JS shell.')
    harness_og.add_option('--jitflags',
                          default='',
                          help="Obsolete. Does nothing.")
    harness_og.add_option(
        '--tbpl',
        action='store_true',
        help='Runs each test in all configurations tbpl tests.')
    harness_og.add_option('-g',
                          '--debug',
                          action='store_true',
                          help='Run a test in debugger.')
    harness_og.add_option('--debugger',
                          default='gdb -q --args',
                          help='Debugger command.')
    harness_og.add_option('-J',
                          '--jorendb',
                          action='store_true',
                          help='Run under JS debugger.')
    harness_og.add_option(
        '--passthrough',
        action='store_true',
        help='Run tests with stdin/stdout attached to caller.')
    harness_og.add_option('--valgrind',
                          action='store_true',
                          help='Run tests in valgrind.')
    harness_og.add_option('--valgrind-args',
                          default='',
                          help='Extra args to pass to valgrind.')
    op.add_option_group(harness_og)

    input_og = OptionGroup(op, "Inputs", "Change what tests are run.")
    input_og.add_option('-f',
                        '--file',
                        dest='test_file',
                        action='append',
                        help='Get tests from the given file.')
    input_og.add_option('-x',
                        '--exclude-file',
                        action='append',
                        help='Exclude tests from the given file.')
    input_og.add_option('-d',
                        '--exclude-random',
                        dest='random',
                        action='store_false',
                        help='Exclude tests marked as "random."')
    input_og.add_option('--run-skipped',
                        action='store_true',
                        help='Run tests marked as "skip."')
    input_og.add_option('--run-only-skipped',
                        action='store_true',
                        help='Run only tests marked as "skip."')
    input_og.add_option('--run-slow-tests',
                        action='store_true',
                        help='Do not skip tests marked as "slow."')
    input_og.add_option(
        '--no-extensions',
        action='store_true',
        help='Run only tests conforming to the ECMAScript 5 standard.')
    op.add_option_group(input_og)

    output_og = OptionGroup(op, "Output",
                            "Modify the harness and tests output.")
    output_og.add_option('-s',
                         '--show-cmd',
                         action='store_true',
                         help='Show exact commandline used to run each test.')
    output_og.add_option(
        '-o',
        '--show-output',
        action='store_true',
        help="Print each test's output to the file given by --output-file.")
    output_og.add_option(
        '-F',
        '--failed-only',
        action='store_true',
        help=
        "If a --show-* option is given, only print output for failed tests.")
    output_og.add_option(
        '--no-show-failed',
        action='store_true',
        help="Don't print output for failed tests (no-op with --show-output).")
    output_og.add_option(
        '-O',
        '--output-file',
        help='Write all output to the given file (default: stdout).')
    output_og.add_option('--failure-file',
                         help='Write all not-passed tests to the given file.')
    output_og.add_option('--no-progress',
                         dest='hide_progress',
                         action='store_true',
                         help='Do not show the progress bar.')
    output_og.add_option('--tinderbox',
                         action='store_true',
                         help='Use tinderbox-parseable output format.')
    op.add_option_group(output_og)

    special_og = OptionGroup(op, "Special",
                             "Special modes that do not run tests.")
    special_og.add_option('--make-manifests',
                          metavar='BASE_TEST_PATH',
                          help='Generate reftest manifest files.')
    op.add_option_group(special_og)
    options, args = op.parse_args()

    # Acquire the JS shell given on the command line.
    options.js_shell = None
    requested_paths = set()
    if len(args) > 0:
        options.js_shell = abspath(args[0])
        requested_paths |= set(args[1:])

    # If we do not have a shell, we must be in a special mode.
    if options.js_shell is None and not options.make_manifests:
        op.error('missing JS_SHELL argument')

    # Valgrind and gdb are mutually exclusive.
    if options.valgrind and options.debug:
        op.error("--valgrind and --debug are mutually exclusive.")

    # Fill the debugger field, as needed.
    prefix = options.debugger.split() if options.debug else []
    if options.valgrind:
        prefix = ['valgrind'] + options.valgrind_args.split()
        if os.uname()[0] == 'Darwin':
            prefix.append('--dsymutil=yes')
        options.show_output = True

    js_cmd_args = options.shell_args.split()
    if options.jorendb:
        options.passthrough = True
        options.hide_progress = True
        options.worker_count = 1
        debugger_path = realpath(
            os.path.join(abspath(dirname(abspath(__file__))), '..', '..',
                         'examples', 'jorendb.js'))
        js_cmd_args.extend(['-d', '-f', debugger_path, '--'])
    TestCase.set_js_cmd_prefix(options.js_shell, js_cmd_args, prefix)

    # If files with lists of tests to run were specified, add them to the
    # requested tests set.
    if options.test_file:
        for test_file in options.test_file:
            requested_paths |= set(
                [line.strip() for line in open(test_file).readlines()])

    # If files with lists of tests to exclude were specified, add them to the
    # excluded tests set.
    excluded_paths = set()
    if options.exclude_file:
        for filename in options.exclude_file:
            try:
                fp = open(filename, 'r')
                for line in fp:
                    if line.startswith('#'): continue
                    line = line.strip()
                    if not line: continue
                    excluded_paths |= set((line, ))
            finally:
                fp.close()

    # Handle output redirection, if requested and relevant.
    options.output_fp = sys.stdout
    if options.output_file:
        if not options.show_cmd:
            options.show_output = True
        try:
            options.output_fp = open(options.output_file, 'w')
        except IOError as ex:
            raise SystemExit("Failed to open output file: " + str(ex))

    # Hide the progress bar if it will get in the way of other output.
    options.hide_progress = (options.tinderbox
                             or not ProgressBar.conservative_isatty()
                             or options.hide_progress)

    return (options, requested_paths, excluded_paths)
예제 #12
0
    # Handle output redirection, if requested and relevant.
    options.output_fp = sys.stdout
    if options.output_file:
        if not options.show_cmd:
            options.show_output = True
        try:
            options.output_fp = open(options.output_file, 'w')
        except IOError, ex:
            raise SystemExit("Failed to open output file: " + str(ex))

    options.show = options.show_cmd or options.show_output

    # Hide the progress bar if it will get in the way of other output.
    options.hide_progress = (options.tinderbox or
                             not ProgressBar.conservative_isatty() or
                             options.hide_progress)

    return (options, requested_paths, excluded_paths)

def parse_jitflags(op_jitflags):
    jitflags = [ [ '-' + flag for flag in flags ]
                 for flags in op_jitflags.split(',') ]
    for flags in jitflags:
        for flag in flags:
            if flag not in ('-m', '-a', '-p', '-d', '-n'):
                print('Invalid jit flag: "%s"'%flag)
                sys.exit(1)
    return jitflags

def load_tests(options, requested_paths, excluded_paths):
예제 #13
0
파일: qark.py 프로젝트: ualwayswithme/qark
common.keyFiles=common.findKeys(common.sourceDirectory)

'''
#Look for improper use of checkCallingPermission, rather than enforceCallingPermission
try:
	useCheckPermission()
except Exception as e:
	common.logger.error("Unable to run checks for improper use of checkCallingPermission: " + str(e))
'''

clearlines(14)
height = common.term.height

try:
	writer1 = common.Writer((0, height-8))
	pbar1 = ProgressBar(widgets=['X.509 Validation ', Percentage(), Bar()], maxval=100, fd=writer1).start()
	writer2 = common.Writer((0, height-6))
	pbar2 = ProgressBar(widgets=['Pending Intents ', Percentage(), Bar()], maxval=100, fd=writer2).start()
	writer3 = common.Writer((0, height-4))
	pbar3 = ProgressBar(widgets=['File Permissions (check 1) ', Percentage(), Bar()], maxval=100, fd=writer3).start()
	writer4 = common.Writer((0, height-2))
	pbar4 = ProgressBar(widgets=['File Permissions (check 2) ', Percentage(), Bar()], maxval=100, fd=writer4).start()
	writer5 = common.Writer((0, height-10))
	pbar5 = ProgressBar(widgets=['Webview checks ', Percentage(), Bar()], maxval=100, fd=writer5).start()
	writer6 = common.Writer((0, height-12))
	pbar6 = ProgressBar(widgets=['Broadcast issues ', Percentage(), Bar()], maxval=100, fd=writer6).start()
	writer7 = common.Writer((0, height-14))
	pbar7 = ProgressBar(widgets=['Crypto issues ', Percentage(), Bar()], maxval=100, fd=writer7).start()

	pub.subscribe(progress_bar_update, 'progress')