Beispiel #1
0
 def do_reset(self, args):
     if args:
         self.config[args] = get_default_config()[args]
     else:
         self.config = get_default_config()
     save_config(self.config)
     self._print_config()
Beispiel #2
0
 def do_reset(self,args):
     if args:
         self.config[args] = get_default_config()[args]
     else:
         self.config = get_default_config()
     save_config(self.config)
     self._print_config()
def main():
    cfg = get_default_config()
    parser = argparse.ArgumentParser(description="SSD Demo.")
    parser.add_argument("--ckpt", type=str, default=None, help="Trained weights.")
    parser.add_argument("--score_threshold", type=float, default=0.25)
    parser.add_argument("--images_dir", default='demo', type=str, help='Specify a image dir to do prediction.')
    parser.add_argument("--output_dir", default='demo/result', type=str,
                        help='Specify a image dir to save predicted images.')
    parser.add_argument("--dataset_type", default="voc", type=str,
                        help='Specify dataset type. Currently support voc and coco.')

    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )
    args = parser.parse_args()
    print(args)

    cfg.merge_from_list(args.opts)
    cfg.freeze()

    print("Running with config:\n{}".format(cfg))

    run_demo(cfg=cfg,
             ckpt=args.ckpt,
             score_threshold=args.score_threshold,
             images_dir=args.images_dir,
             output_dir=args.output_dir,
             dataset_type=args.dataset_type)
Beispiel #4
0
def app(ctx, alt_config, config_values, data_dir, log_config):

    # configure logging
    log_config = log_config or ':info'
    slogging.configure(log_config)

    # data dir default or from cli option
    data_dir = data_dir or konfig.default_data_dir
    konfig.setup_data_dir(data_dir)  # if not available, sets up data_dir and required config
    log.info('using data in', path=data_dir)

    # prepare configuration
    # config files only contain required config (privkeys) and config different from the default
    if alt_config:  # specified config file
        config = konfig.load_config(alt_config)
    else:  # load config from default or set data_dir
        config = konfig.load_config(data_dir)

    config['data_dir'] = data_dir

    # add default config
    konfig.update_config_with_defaults(config, konfig.get_default_config([EthApp] + services))

    # override values with values from cmd line
    for config_value in config_values:
        try:
            konfig.set_config_param(config, config_value)
            # check if this is part of the default config
        except ValueError:
            raise BadParameter('Config parameter must be of the form "a.b.c=d" where "a.b.c" '
                               'specifies the parameter to set and d is a valid yaml value '
                               '(example: "-c jsonrpc.port=5000")')
    ctx.obj = {'config': config}
def main(args, seed=None):
    random.seed(seed)
    # Read the config
    global current_config
    if not current_config:
        current_config = get_default_config()
    # Add the subject's path to sys.path
    sys.path.insert(0, args[1][:args[1].rfind("/") + 1])
    # Generate rejected input strings
    if len(args) > 4 and args[4]:
        with open(args[4], "r", encoding="UTF-8") as vf:
            valid_ins = eval(vf.read())
    else:
        valid_ins = None
    res = gen(args[1],
              int(args[2])
              if len(args) > 2 else int(current_config["default_gen_time"]),
              valid_strs=valid_ins)
    outfile = args[3] if len(args) > 3 else current_config["default_rejected"]
    resl = []
    for r in res:
        resl.append(r)
    print("All generated strings:", resl, flush=True)
    print(str(len(resl)), " rejected elements created", flush=True)
    # Save the mutated strings in binary as a file
    res_file = open(outfile, mode='wb')
    pickle.dump(resl, res_file)
    res_file.close()
Beispiel #6
0
def app(ctx, profile, alt_config, config_values, data_dir, log_config, bootstrap_node, log_json,
        mining_pct, unlock, password):

    # configure logging
    slogging.configure(log_config, log_json=log_json)

    # data dir default or from cli option
    data_dir = data_dir or konfig.default_data_dir
    konfig.setup_data_dir(data_dir)  # if not available, sets up data_dir and required config
    log.info('using data in', path=data_dir)

    # prepare configuration
    # config files only contain required config (privkeys) and config different from the default
    if alt_config:  # specified config file
        config = konfig.load_config(alt_config)
    else:  # load config from default or set data_dir
        config = konfig.load_config(data_dir)

    config['data_dir'] = data_dir

    # add default config
    konfig.update_config_with_defaults(config, konfig.get_default_config([EthApp] + services))

    log.DEV("Move to EthApp.default_config")
    konfig.update_config_with_defaults(config, {'eth': {'block': blocks.default_config}})

    # Set config values based on profile selection
    merge_dict(config, PROFILES[profile])

    # override values with values from cmd line
    for config_value in config_values:
        try:
            konfig.set_config_param(config, config_value)
            # check if this is part of the default config
            if config_value.startswith("eth.genesis"):
                del config['eth']['genesis_hash']
        except ValueError:
            raise BadParameter('Config parameter must be of the form "a.b.c=d" where "a.b.c" '
                               'specifies the parameter to set and d is a valid yaml value '
                               '(example: "-c jsonrpc.port=5000")')

    # Load genesis config
    update_config_from_genesis_json(config, config['eth']['genesis'])

    if bootstrap_node:
        config['discovery']['bootstrap_nodes'] = [bytes(bootstrap_node)]
    if mining_pct > 0:
        config['pow']['activated'] = True
        config['pow']['cpu_pct'] = int(min(100, mining_pct))
    if not config['pow']['activated']:
        config['deactivated_services'].append(PoWService.name)

    ctx.obj = {'config': config,
               'unlock': unlock,
               'password': password.read().rstrip() if password else None}
    assert (password and ctx.obj['password'] is not None and len(
        ctx.obj['password'])) or not password, "empty password file"
Beispiel #7
0
def main():
    default_config = get_default_config()
    task_names = default_config.get_option_value('task_list', 'task_names')
    task_names = task_names.replace(' ', '').split(',')
    task_info = []
    for name in task_names:
        task_info.append({'task_name': name})

    service_threads = Service(5, *task_info)
    service_threads.start()
    service_threads.wait()
Beispiel #8
0
def load_config():
    parser = argparse.ArgumentParser()
    parser.add_argument('--config', type=str)
    parser.add_argument('options', default=None, nargs=argparse.REMAINDER)
    args = parser.parse_args()

    config = get_default_config()
    if args.config is not None:
        config.merge_from_file(args.config)
    config.merge_from_list(args.options)
    if not torch.cuda.is_available():
        config.train.device = 'cpu'
    config.freeze()
    return config
Beispiel #9
0
def app(ctx, alt_config, config_values, data_dir, log_config, bootstrap_node, log_json,
        mining_pct, unlock, password):

    # configure logging
    log_config = log_config or ':info'
    slogging.configure(log_config, log_json=log_json)

    # data dir default or from cli option
    data_dir = data_dir or konfig.default_data_dir
    konfig.setup_data_dir(data_dir)  # if not available, sets up data_dir and required config
    log.info('using data in', path=data_dir)

    # prepare configuration
    # config files only contain required config (privkeys) and config different from the default
    if alt_config:  # specified config file
        config = konfig.load_config(alt_config)
    else:  # load config from default or set data_dir
        config = konfig.load_config(data_dir)

    config['data_dir'] = data_dir

    # add default config
    konfig.update_config_with_defaults(config, konfig.get_default_config([EthApp] + services))

    # override values with values from cmd line
    for config_value in config_values:
        try:
            konfig.set_config_param(config, config_value)
            # check if this is part of the default config
        except ValueError:
            raise BadParameter('Config parameter must be of the form "a.b.c=d" where "a.b.c" '
                               'specifies the parameter to set and d is a valid yaml value '
                               '(example: "-c jsonrpc.port=5000")')
    if bootstrap_node:
        config['discovery']['bootstrap_nodes'] = [bytes(bootstrap_node)]
    if mining_pct > 0:
        config['pow']['activated'] = True
        config['pow']['cpu_pct'] = int(min(100, mining_pct))
    if not config['pow']['activated']:
        config['deactivated_services'].append(PoWService.name)

    ctx.obj = {'config': config,
               'unlock': unlock,
               'password': password.read().rstrip() if password else None}
    assert (password and ctx.obj['password'] is not None and len(ctx.obj['password'])) or not password, "empty password file"
Beispiel #10
0
def app(ctx, alt_config, config_values, data_dir, log_config, bootstrap_node, log_json,
        mining_pct, unlock, password):

    # configure logging
    log_config = log_config or ':info'
    slogging.configure(log_config, log_json=log_json)

    # data dir default or from cli option
    data_dir = data_dir or konfig.default_data_dir
    konfig.setup_data_dir(data_dir)  # if not available, sets up data_dir and required config
    log.info('using data in', path=data_dir)

    # prepare configuration
    # config files only contain required config (privkeys) and config different from the default
    if alt_config:  # specified config file
        config = konfig.load_config(alt_config)
    else:  # load config from default or set data_dir
        config = konfig.load_config(data_dir)

    config['data_dir'] = data_dir

    # add default config
    konfig.update_config_with_defaults(config, konfig.get_default_config([EthApp] + services))

    # override values with values from cmd line
    for config_value in config_values:
        try:
            konfig.set_config_param(config, config_value)
            # check if this is part of the default config
        except ValueError:
            raise BadParameter('Config parameter must be of the form "a.b.c=d" where "a.b.c" '
                               'specifies the parameter to set and d is a valid yaml value '
                               '(example: "-c jsonrpc.port=5000")')
    if bootstrap_node:
        config['discovery']['bootstrap_nodes'] = [bytes(bootstrap_node)]
    if mining_pct > 0:
        config['pow']['activated'] = True
        config['pow']['cpu_pct'] = int(min(100, mining_pct))
    if not config['pow']['activated']:
        config['deactivated_services'].append(PoWService.name)

    ctx.obj = {'config': config,
               'unlock': unlock,
               'password': password.read().rstrip() if password else None}
def remove_duplicates(fdir, ext, pairlst):
    print("Removing duplicates...", flush=True)
    global current_config
    current_config = get_default_config()
    ext = ext if not ext.startswith(".") else ext[1:]
    fdir = fdir if not fdir.endswith("/") else fdir[:-1]
    files = []
    dups = []
    for fl in glob.glob(fdir + "/*." + ext):
        fl = fl.replace("\\", "/")
        files.append(fl)

    if len(files) < 2:
        return pairlst

    num_threads = int(current_config["test_threads"])
    future_to_index = {}

    with concurrent.futures.ThreadPoolExecutor(max_workers=num_threads) as tpe:
        for (idx1, idxe) in compute_distribution(num_threads, len(files)):
            future_to_index[tpe.submit(compare_index, idx1, idxe,
                                       files)] = idx1

        for future in concurrent.futures.as_completed(future_to_index):
            for dpc in future.result():
                if dpc not in dups:
                    dups.append(dpc)

    for dpf in dups:
        if os.path.exists(dpf):
            os.remove(dpf)

    i = 0
    while i < len(pairlst):
        if pairlst[i][0] in dups:
            del pairlst[i]
        else:
            i += 1

    print("Removed duplicates:", len(dups), flush=True)

    return pairlst
Beispiel #12
0
def main():
    config = c.get_default_config()
    parser = ArgumentParser(version=__version__)

    config = create_config(parser)

    # Logging
    configure_logging(config.get('misc', 'logging') or '', verbosity=config.getint('misc', 'verbosity'))
    logger.info('PyEPM %s', __version__)
    logger.info('=====')

    logger.debug(c.dump_config(config))

    args = parser.parse_args()

    for filename in args.filename:
        if not os.path.exists(filename):
            logger.warn("File does not exist: %s" % filename)
        else:
            logger.info("Deploying %s..." % filename)
            deployment = deploy.Deploy(filename, config)
            deployment.deploy()
Beispiel #13
0
def main():
    config = c.get_default_config()
    parser = ArgumentParser(version=__version__)

    config = create_config(parser)

    # Logging
    configure_logging(config.get('misc', 'logging') or '', verbosity=config.getint('misc', 'verbosity'))
    logger.info(colors.HEADER + '=====' + colors.ENDC)
    logger.info(colors.OKGREEN + 'PyEPM ' + colors.ENDC + '%s', __version__)
    logger.info(colors.HEADER + '=====' + colors.ENDC)

    logger.debug(c.dump_config(config))

    args = parser.parse_args()

    for filename in args.filename:
        if not os.path.exists(filename):
            logger.warn("\nFile does not exist: %s" % filename)
        else:
            logger.info("\nDeploying " + colors.BOLD + "%s" % filename + colors.ENDC + "...")
            deployment = deploy.Deploy(filename, config)
            deployment.deploy()
Beispiel #14
0
def main():
    config = c.get_default_config()
    parser = ArgumentParser(version=__version__)

    config = create_config(parser)

    # Logging
    configure_logging(config.get('misc', 'logging') or '',
                      verbosity=config.getint('misc', 'verbosity'))
    logger.info('PyEPM %s', __version__)
    logger.info('=====')

    logger.debug(c.dump_config(config))

    args = parser.parse_args()

    for filename in args.filename:
        if not os.path.exists(filename):
            logger.warn("File does not exist: %s" % filename)
        else:
            logger.info("Deploying %s..." % filename)
            deployment = deploy.Deploy(filename, config)
            deployment.deploy()
Beispiel #15
0
def app(ctx, alt_config, config_values, data_dir, log_config):

    # configure logging
    log_config = log_config or ':info'
    slogging.configure(log_config)

    # data dir default or from cli option
    data_dir = data_dir or konfig.default_data_dir
    konfig.setup_data_dir(
        data_dir)  # if not available, sets up data_dir and required config
    log.info('using data in', path=data_dir)

    # prepare configuration
    # config files only contain required config (privkeys) and config different from the default
    if alt_config:  # specified config file
        config = konfig.load_config(alt_config)
    else:  # load config from default or set data_dir
        config = konfig.load_config(data_dir)

    config['data_dir'] = data_dir

    # add default config
    konfig.update_config_with_defaults(
        config, konfig.get_default_config([EthApp] + services))

    # override values with values from cmd line
    for config_value in config_values:
        try:
            konfig.set_config_param(config, config_value)
            # check if this is part of the default config
        except ValueError:
            raise BadParameter(
                'Config parameter must be of the form "a.b.c=d" where "a.b.c" '
                'specifies the parameter to set and d is a valid yaml value '
                '(example: "-c jsonrpc.port=5000")')
    ctx.obj = {'config': config}
Beispiel #16
0
# Maps a condition line to the line of its then branch
cond_dict = {}
# Records all lines the program executed
lines = []
# Maps a variable to all its recorded assignments
vrs = {}
# Stores which file to track
ar = ""
# Timeout value
timeo = None
# The time the execution started. Used to abort in case a timeout occurs.
time_start = None
# target_type = type("")
# By using the commented line instead we get a seizable improvement in execution time but may consume more memory
target_type = eval("type(" + get_default_config()["trace_type"] + ")")
# The AST of the original file. Stores where exceptions are manually raised and line numbers of conditional branches
base_ast = None
# Indicates which condition needs to be resolved at which call depth (call and return).
# This way we know whether a condition is True or False without directly evaluating.
cond_flag = {}
# The current call depth
depth = -1
# Logging variable assignments
should_log_vass = False


# The AST that stores which lines are in exception classes as well as the condition to then branch mapping
class CondAST:
    def __init__(self, sourcefile, deformattedfile):
        # Get the program's AST
Beispiel #17
0
def main(argv, seed=None):
    random.seed(seed)
    global current_config
    current_config = get_default_config()
    arg = argv[1]
    arg = arg.replace("\\", "/")
    ar1 = arg
    orig_file = ar1
    mut_dir = arg[arg.rfind("/")+1:arg.rfind(".")] if arg.rfind("/") >= 0 else arg[:arg.rfind(".")]
    script_name = mut_dir
    mut_dir = (current_config["default_mut_dir"]+"/").replace("//","/") + mut_dir + "/"
    # Add script's directory to path
    sys.path.insert(0, mut_dir)
    # Store the reason why the mutation was completed
    mutants_with_cause = []
    # Timeout since our modifications may cause infinite loops
    timeout = int(current_config["min_timeout"]) if len(argv) < 4 or not argv[3] else argv[3]
    if not os.path.exists(mut_dir):
        os.makedirs(mut_dir)
    else:
        cleanup(mut_dir)

    # Index of the string currently processed
    str_cnt = 0
    # Mutation counter
    mut_cnt = 0
    pick_file = argv[2] if len(argv) > 2 else current_config["default_rejected"]
    pick_handle = open(pick_file, 'rb')
    rej_strs = pickle.load(pick_handle)
    if not rej_strs:
        raise SystemExit("File: " + pick_file + " contains no inputs.")

    # Precompute the locations of conditions and the lines of their then and else case and format the file properly
    global manual_errs
    manual_errs = argtracer.compute_base_ast(ar1, mut_dir + script_name + ".py")
    ar1 = mut_dir + script_name + ".py"

    # Record how long the slowest execution takes to have a better prediction of the required timeout
    slowest_run = 0
    # Get base values from the non-crashing run with the most conditions traversed
    progress = 1
    base_conds = []
    ln_cond = -1
    for cand in rej_strs:
        pos = 0
        print("Mutated string:", repr(cand[0]), flush=True)
        print("Valid string:", repr(cand[1]), flush=True)
        base_index = 0
        for str_inpt in cand:
            start_time = timer()
            try:
                print("Tracing:", progress, "/", 2*len(rej_strs), flush=True)
                (_,base_cond,_,someerror) = argtracer.trace(ar1, str_inpt)
                if pos == 1:
                    base_conds.append(base_cond)
                    if len(base_cond) > ln_cond:
                        basein = cand[1]
                        base_pos = base_index
                        ln_cond = len(base_cond)
                    if someerror:
                        raise SystemExit("Invalid input: " + repr(str_inpt) + ".\nAborted.")
                    base_index += 1
            finally:
                pos += 1
                time_elapsed = timer() - start_time
                if time_elapsed > slowest_run:
                    slowest_run = time_elapsed
                progress += 1
    # Choose a timeout that is very likely to let valid mutants finish
    timeout = max(timeout, int(int(current_config["timeout_slow_multi"])*slowest_run)+1)
    try:
        (_, b_cdict, _, err) = argtracer.trace(ar1, basein, timeout=timeout)
    except Timeout:
        print("Execution timed out on basestring! Try increasing timeout (currently", timeout," seconds)")    

    if err:
    	raise SystemExit("Exiting: " + pick_file + " contains no valid inputs for " + ar1)

    # Remove duplicates (same condition trace) from valid inputs
    idxl = 0
    idxr = 0
    while idxl < len(base_conds):
        idxr = idxl+1
        while idxr < len(base_conds):
            if get_frozen(base_conds[idxl]) == get_frozen(base_conds[idxr]):
                del base_conds[idxr]
            else:
                idxr += 1
        idxl += 1

    print("Amount of unique base strings:", len(base_conds), flush=True)

    print("Used baseinput:", repr(basein))

    # Log the inputs since they are determined already
    input_log = LogWriter(mut_dir[:-1] + "_inputs.log")
    for i in range(len(rej_strs)):
        input_log.append_line(str(i) + ": " + repr(rej_strs[i][0])+"\n")
    input_log.append_line("The baseinput was: " + repr(basein))

    lwriter = LogWriter(mut_dir[:-1] + ".log")
    lwriter.append_line("Mutating script: " + repr(orig_file) + "\n")

    all_generated = { int_key : [] for int_key in range(len(base_conds)) }

    # Run the mutation process for every rejected string
    for s in rej_strs:
        s = s[0]
        if int(current_config["variable_base"]) == 0:
            queue = [(ar1, [], 0, None, None, None, base_index)]
        else:
            queue = []
            for base_index in range(len(base_conds)):
                queue.append((ar1, [], 0, None, None, None, base_index))
        discarded = set()
        # Save which exception the first execution of the rejected string produced
        original_ex_str = None
        # Stores which exceptions the valid string caused
        except_set = set()
        # The set of final lines observed by mutants rejecting the valid string
        rej_sigs = set()
        while queue:
            (arg, history, retries, pidx, pmstate, scstate, b_cindex) = queue.pop(0)
            skip = False
            b_cdict = base_conds[b_cindex]
            print("Current script:", arg, flush=True)
            # Check whether the chosen correct string is now rejected
            try:
                _mod = imp.load_source('mymod', arg)
            except:
                print("Discarded script:", arg, "(import error)", flush=True)
                os.remove(arg)
                continue
            print("Executing basestring...", flush=True)
            try:
                (lines, _, _, berr) = argtracer.trace(arg, basein, timeout=timeout)
            except argtracer.Timeout:
                print("Discarding:", arg, "(basestring timed out)", flush=True)
                os.remove(arg)
                continue

            # Remove lines used to construct custom exceptions
            lines = manual_errs.remove_custom_lines(lines)

            # If the crash happens on a condition we modified there is a high chance it's invalid, so we remove it.
            if lines[0] in history:
                print("Removed:", arg, "(potentially corrupted condition)", flush=True)
                os.remove(arg)
                continue

            # Mutation guided by rejected strings

            try:
                (lines, cdict, _, err) = argtracer.trace(arg, s, timeout=timeout)
            except:
                print("Discarding:", arg, "(mutated string timed out)", flush=True)
                os.remove(arg)
                continue

            # Remove lines used to construct custom exceptions
            lines = manual_errs.remove_custom_lines(lines)
            # If the crash happens on a condition we modified there is a high chance it's invalid, so we remove it.
            if lines[0] in history:
                print("Removed:", arg, "(potentially corrupted condition)", flush=True)
                os.remove(arg)
                continue

            if original_ex_str is None:
                if err == False:
                    print("Skipping string:", s, "(not rejected)!", flush=True)
                    continue
                else:
                    original_ex_str = str(err.__class__)

            # Check whether the modification changed the condition state
            skip = pmstate is not None and cdict.get(history[-1]) is not None and cdict.get(history[-1]) == pmstate

            if skip:
                print("Removed:", arg, "(unsuccessful modification)", flush=True)
                if retries < int(current_config["mut_retries"]) and pidx:
                    # Try again
                    full_str = manual_errs.get_if_from_line(history[-1], ar1)
                    cond_str = full_str[full_str.find("if")+3:full_str.rfind(":")]
                    inpt_ast = ast.fix_missing_locations(ast.parse(cond_str))
                    mtrans = MutTransformer(pidx)
                    res = mtrans.visit(inpt_ast)
                    fix = full_str[:full_str.find("if")+2] + " " + astunparse.unparse(res).lstrip().rstrip() + ":"
                    if not fix.endswith("\n"):
                        fix = fix + "\n"
                        mods = { history[-1] : fix }
                        cand = mut_dir + script_name + "_" + str(str_cnt) + "_" + str(mut_cnt) + ".py"
                        queue.insert(0,(cand, history.copy(), retries+1, pidx, pstate, None, b_cindex))
                        file_copy_replace(cand, arg, mods)
                        mut_cnt += 1
                elif retries >= int(current_config["mut_retries"]):
                    print("Retries exceeded:", arg, flush=True)
                os.remove(arg)
                continue

            sskip = (scstate is not None and cdict.get(history[-1]) is not None and cdict.get(history[-1]) == scstate)
            # Retries would be possible here as well, but since our search is blind for these conditions it's skipped
            if sskip:
                print("Removed:", arg, "(unsuccessful modification) (sec)", flush=True)
                os.remove(arg)
                continue

            if berr and (lines[0] not in rej_sigs or berr not in except_set):
                print("Mutation complete:", arg, "(base rejected)", flush=True)
                print("Exception for base on", arg, ":", repr(berr), flush=True)
                mutants_with_cause.append((arg, "valid string rejected"))
                lwriter.append_line(repr(mutants_with_cause[-1]) + "\n")

            (prim, sec) = get_left_diff(cdict, b_cdict)
            # Remove all elements that have been explored (history) or do not belong to the actual code (i.e. error constructor - lines)
            prim = [e for e in prim if e[0] not in history and e[0] in lines]
            sec = [e for e in sec if e[0] not in history and e[0] in lines] if int(current_config["blind_continue"]) else []
           
            # Don't create mutants if their line combination is already in the queue
            prim = [] if not prim else rm_dups(prim, history, all_generated, b_cindex)

            # Sec will never be progressed if prim is not empty
            sec = [] if not sec or len(prim) > 0 else rm_dups(sec, history, all_generated, b_cindex)

            print("Used string:", repr(s), flush=True)
            print("Queue length:", len(queue), flush=True)
            print("Change history:", history, flush=True)
            print("Difference to base (flipped):", prim, flush=True)
            print("Difference to base (new):", sec, flush=True)
            print("Final line:", str(lines[0]), flush=True)
            print("", flush=True)
            if err:
            	# Check whether the exception is different from the first encountered one
            	diff_err = str(err.__class__) != original_ex_str
            	err = True
            print("Mutated string rejected:", err, "different:", diff_err, flush=True)
            if (err and not diff_err) or int(current_config["early_stop"]) == 0:
                all_fixes = get_possible_fixes((prim, sec), arg)
                if all_fixes:
                    for (fix_list, fix_line, pstate, sstate) in all_fixes:
                        # Create a mutant for every possible fix
                        for (fix, permindex) in fix_list:
                            if not fix.endswith("\n"):
                                fix = fix + "\n"
                            cand = mut_dir + script_name + "_" + str(str_cnt) + "_" + str(mut_cnt) + ".py"
                            mods = { fix_line : fix }
                            queue.insert(0,(cand, history.copy()+[fix_line],0, permindex, pstate, sstate, b_cindex))
                            file_copy_replace(cand, arg, mods)
                            mut_cnt += 1
            # Check whether the mutant is valid (rejects base or accepts mutated string) and record its behaviour
            if arg != ar1:
                if not err or diff_err:
                	print("Mutation complete:", arg, "(mutated string accepted)", flush=True)
                	mutants_with_cause.append((arg, "mutated string accepted"))
                	lwriter.append_line(repr(mutants_with_cause[-1]) + "\n")
                elif not berr or (berr and (lines[0] in rej_sigs and berr in except_set)):
                    discarded.add(arg)
                    rej_sigs.add(lines[0])
                    except_set.add(berr)
            		
        # Don't delete the original script, we need it to create mutants from whenever a new rejected string is processed
        discarded.discard(ar1)
        # Remove all scripts that neither reject the base string nor accept the mutated string
        for scrpt in discarded:
            print("Removed:", scrpt, flush=True)
            os.remove(scrpt)
        # Adjust the file naming
        str_cnt += 1
        mut_cnt = 0
        print("Processing string number:", str(str_cnt), "/", str(len(rej_strs)),flush=True)
    # Move the copy of the original script since it is not a mutant
    orig_out = current_config["default_mut_dir"] + ar1[ar1.rfind("/")+1:]
    if os.path.exists(orig_out):
        os.remove(orig_out)
    os.rename(ar1, orig_out)
    print("Done. The final mutants are in:", mut_dir)
    # Remove duplicates and update the log accordingly
    mutants_with_cause = remove_duplicates(mut_dir, ".py", mutants_with_cause)

    lwriter = LogWriter(mut_dir[:-1] + ".log")
    lwriter.append_line("Mutating script: " + repr(orig_file) + "\n")
    for e in mutants_with_cause:
        lwriter.append_line(repr(e) + "\n")
def main(argv):
    global current_config
    current_config = get_default_config()
    prog = argv[1] if argv[1].endswith(".py") else argv[1] + ".py"
    binfile = "rejected_" + prog[prog.rfind("/")+1:prog.rfind(".py")] + ".bin" if not argv[2] else argv[2]
    timelimit = int(current_config["default_gen_time"]) if not argv[3] else argv[3]
    timeout = int(current_config["min_timeout"]) if not argv[4] else argv[4]
    seed = None if not argv[5] else argv[5]
    valid_file = None if not argv[6] else argv[6]
    filter_py = None if not argv[7] else argv[7]
    if filter_py:
        filter_py = filter_py if filter_py.endswith(".py") else filter_py + ".py"

    if seed is None:
        random.seed()
        # A random 32 bit integer
        seed = random.randrange(2**31-1)

    # Generate inputs in case no binary file is supplied
    print_step_time('"gen"')
    if not argv[2]:
        tprog = filter_py if filter_py else prog
        instr_code = rewrite_ast(tprog)
        target_loc = tprog[:-3] + "_instr.py"
        with open(target_loc, "w", encoding="UTF-8") as inst_out:
            inst_out.write(instr_code)
        print("Generating inputs for:", tprog, "...", flush=True)
        gen([None, target_loc, timelimit, binfile, valid_file],seed)
    # Otherwise use the given inputs
    else:
        print("Using inputs from:", binfile, flush=True)

    if filter_py:
        print("Filtering inputs...", flush=True)
        convert_with_filter(prog, binfile, binfile)

    print_step_time('"gen"')

    print("Starting mutation...", prog, "(Timestamp: '" + str(datetime.datetime.now()) + ", seed: " + str(seed) + "')", flush=True)
    # Run the mutation algorithm
    print_step_time('"mutation"')
    mutate([None, prog, binfile, timeout], seed)
    print_step_time('"mutation"')
    # Check whether the results are fine and remove potentially problematic scripts
    if int(current_config["quick_check"]) == 0:
        print("Testing result integrity...", flush=True)
        print_step_time('"verify"')
        check([None, prog, binfile, True])
        print_step_time('"verify"')
    # Run the program's test suite
    print("Running unit tests...", flush=True)
    print_step_time('"run tests"')
    run_tests([None, current_config["default_mut_dir"] + prog[prog.rfind("/")+1:-3]+"/"])
    print_step_time('"run tests"')
    # Only check mutants that fail no tests as the others are non-equivalent by definition
    if int(current_config["quick_check"]) > 0:
        print("Testing result integrity...", flush=True)
        print_step_time('"verify"')
        check([None, prog, binfile, True])
        print_step_time('"verify"')        
    print()
    print("Done.", "(Timestamp: '" + str(datetime.datetime.now()) + "')", flush=True)
#!/usr/bin/env python3
import sys
import pickle
import imp
from config import get_default_config
from generate_reject import bitflip, byteflip, trim, delete, insert, swap, RandomizedList
import random
import taintedstr

current_config = get_default_config()


def main(args):
    # Generate rejected input strings
    if len(args) < 4:
        raise SystemExit(
            "Please specify: 'subject.py' 'output_file.bin' 'at least one valid string' as parameters"
        )
    t_prog = args[1]
    # Adjust path to allow imports
    subpath = ""
    path_lst = t_prog.split("/")[:-1]
    for ele in path_lst:
        if subpath:
            subpath = subpath + ele + "/"
        else:
            subpath = ele + "/"
        sys.path.insert(0, subpath)
    outfile = args[2]
    resl = []
    for i in range(3, len(sys.argv)):
Beispiel #20
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--config-file',
                        type=str,
                        default='',
                        help='path to config file')
    parser.add_argument('-s',
                        '--sources',
                        type=str,
                        nargs='+',
                        help='source datasets (delimited by space)')
    parser.add_argument('-t',
                        '--targets',
                        type=str,
                        nargs='+',
                        help='target datasets (delimited by space)')
    parser.add_argument('--transforms',
                        type=str,
                        nargs='+',
                        help='data augmentation')
    parser.add_argument('--root',
                        type=str,
                        default='',
                        help='path to data root')
    parser.add_argument('opts',
                        default=None,
                        nargs=argparse.REMAINDER,
                        help='Modify config options using the command-line')
    args = parser.parse_args()

    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available()
    if args.config_file:
        cfg.merge_from_file(args.config_file)
    reset_config(cfg, args)
    cfg.merge_from_list(args.opts)
    set_random_seed(cfg.train.seed)
    check_cfg(cfg)

    log_name = 'test.log' if cfg.test.evaluate else 'train.log'
    log_name += time.strftime('-%Y-%m-%d-%H-%M-%S')
    sys.stdout = Logger(osp.join(cfg.data.save_dir, log_name))

    print('Show configuration\n{}\n'.format(cfg))
    print('Collecting env info ...')
    print('** System info **\n{}\n'.format(collect_env_info()))

    if cfg.use_gpu:
        torch.backends.cudnn.benchmark = True

    datamanager = build_datamanager(cfg)

    print('Building model: {}'.format(cfg.model.name))
    model = torchreid.models.build_model(
        name=cfg.model.name,
        num_classes=datamanager.num_train_pids,
        loss=cfg.loss.name,
        pretrained=cfg.model.pretrained,
        use_gpu=cfg.use_gpu)
    num_params, flops = compute_model_complexity(
        model, (1, 3, cfg.data.height, cfg.data.width))
    print('Model complexity: params={:,} flops={:,}'.format(num_params, flops))

    if cfg.model.load_weights and check_isfile(cfg.model.load_weights):
        load_pretrained_weights(model, cfg.model.load_weights)

    if cfg.use_gpu:
        model = nn.DataParallel(model).cuda()

    optimizer = torchreid.optim.build_optimizer(model, **optimizer_kwargs(cfg))
    scheduler = torchreid.optim.build_lr_scheduler(optimizer,
                                                   **lr_scheduler_kwargs(cfg))

    if cfg.model.resume and check_isfile(cfg.model.resume):
        cfg.train.start_epoch = resume_from_checkpoint(cfg.model.resume,
                                                       model,
                                                       optimizer=optimizer,
                                                       scheduler=scheduler)

    print('Building {}-engine for {}-reid'.format(cfg.loss.name,
                                                  cfg.data.type))
    engine = build_engine(cfg, datamanager, model, optimizer, scheduler)
    engine.run(**engine_run_kwargs(cfg))
Beispiel #21
0
from cliff.api import Cliff

VERSION = "2.6.1"

MAX_CHARS = 250  # limit the amount of text users can send in

app = Flask(__name__)

# setup logging
logging.basicConfig(level=logging.WARN)
log = logging.getLogger(__file__)
log.info(
    "---------------------------------------------------------------------------"
)

app_config = config.get_default_config()

# set up the api client we will use
CLIFF_URL = app_config.get('CLIFF_URL')
cliff = Cliff(CLIFF_URL)
cliff.PARSE_TEXT_PATH = "/cliff/parse/text"  # instead of "/cliff-2.6.1/parse/text"


# render the homepage
@app.route("/")
def index():
    return render_template('home.html', version=VERSION)


# return json results from CLIFF
@app.route("/process", methods=['POST'])
Beispiel #22
0
from flask import Flask, render_template, request

from cliff.api import Cliff

VERSION = "2.3.0"

MAX_CHARS = 250 	# limit the amount of text users can send in

app = Flask(__name__)

# setup logging
logging.basicConfig(level=logging.WARN)
log = logging.getLogger(__file__)
log.info("---------------------------------------------------------------------------")

app_config = config.get_default_config()

# set up the api client we will use
CLIFF_URL = app_config.get('CLIFF_URL')
cliff = Cliff(CLIFF_URL)


# render the homepage
@app.route("/")
def index():
    return render_template('home.html', version=VERSION)


# return json results from CLIFF 
@app.route("/process",methods=['POST'])
def geoparse():
Beispiel #23
0
    def __init__(self):
        super(MainWindow, self).__init__()
        self.setAcceptDrops(True)

        self.loader = None
        self.dirty = False
        self._selectSlotBlock = False
        self._config = config.get_default_config()

        self.labelListWidget = LabelListWidget(self)
        self.labelListDock = QtWidgets.QDockWidget('Displayed Label List',
                                                   self)
        self.labelListDock.setWidget(self.labelListWidget)

        self.allLabelList = LabelListWidget(self)
        self.allLabelListDock = QtWidgets.QDockWidget('Label List', self)
        self.allLabelListDock.setWidget(self.allLabelList)

        self.colorTableWidget = TaglistWidget(self)
        self.colorTableWidget.loadFromJson(self._config['tags'])
        self.colorTableDock = QtWidgets.QDockWidget('Color Table', self)
        self.colorTableDock.setWidget(self.colorTableWidget)

        self.addDockWidget(Qt.RightDockWidgetArea, self.allLabelListDock)
        self.addDockWidget(Qt.RightDockWidgetArea, self.colorTableDock)
        self.tabifyDockWidget(self.allLabelListDock, self.labelListDock)

        self.view = BaseView()
        self.setCentralWidget(self.view)
        self.statusBar().show()
        self.resize(1200, 800)

        action = functools.partial(utils.createAction, self)
        open_ = action("&Open", self.open, 'open',
                       self.tr('Open image or label file'))
        exit_ = action("&Exit", tip=self.tr('Quit Application'))
        openDir_ = action("&Open Dir", self.open, 'open')
        createMode_ = action("&Create Polygons",
                             lambda: self.toggleDrawMode(type.Mode_polygon),
                             'objects', self.tr('Start drawing polygons'))

        createRectangleMode_ = action(
            self.tr('Create Rectangle'),
            lambda: self.toggleDrawMode(type.Mode_rectangle),
            'objects',
            self.tr('Start drawing rectangles'),
            enabled=False,
        )
        createCircleMode_ = action(
            self.tr('Create Circle'),
            lambda: self.toggleDrawMode(type.Mode_circle),
            'objects',
            self.tr('Start drawing circles'),
            enabled=False,
        )
        createLineMode_ = action(
            self.tr('Create Line'),
            lambda: self.toggleDrawMode(type.Mode_line),
            'objects',
            self.tr('Start drawing lines'),
            enabled=False,
        )
        createPointMode_ = action(
            self.tr('Create Point'),
            lambda: self.toggleDrawMode(type.Mode_point),
            'objects',
            self.tr('Start drawing points'),
            enabled=False,
        )
        createLineStripMode_ = action(
            self.tr('Create LineStrip'),
            lambda: self.toggleDrawMode(type.Mode_linestrip),
            'objects',
            self.tr('Start drawing linestrip. Ctrl+LeftClick ends creation.'),
            enabled=False,
        )
        createBoxMode_ = action(
            self.tr('Create Box'),
            lambda: self.toggleDrawMode(type.Mode_box),
            'objects',
            self.tr('Start drawing box.'),
            enabled=False,
        )

        delete_ = action(self.tr('Delete Polygons'),
                         self.deleteSelectedShape,
                         'cancel',
                         self.tr('Delete the selected polygons'),
                         enabled=False)

        deleteAll_ = action(self.tr('Delete All'),
                            self.deleteSelectedShape,
                            'delete',
                            self.tr('Delete all polygons'),
                            enabled=False)

        edit_ = action('&Edit Label',
                       lambda: self.toggleDrawMode(None),
                       'edit',
                       'Modify the label of the selected polygon',
                       enabled=False)

        save_ = action(self.tr('&Save'),
                       self.saveFile,
                       'save',
                       self.tr('Save labels to file'),
                       enabled=False)

        saveAs_ = action(self.tr('&Save As'),
                         self.saveFileAs,
                         'save-as',
                         self.tr('Save labels to a different file'),
                         enabled=False)

        nextTag_ = action(
            self.tr('&Next Tag'),
            slot=lambda: self.colorTableWidget.selectNext(),
            tip=self.tr('Go to a next tag'),
        )
        prevTag_ = action(
            self.tr('&Previous Tag'),
            slot=lambda: self.colorTableWidget.selectPrev(),
            tip=self.tr('Go to a previous tag'),
        )
        homeTag_ = action(
            self.tr('&Home Tag'),
            slot=lambda: self.colorTableWidget.selectHome(),
            tip=self.tr('Go to a start tag'),
        )
        endTag_ = action(
            self.tr('&End Tag'),
            slot=lambda: self.colorTableWidget.selectEnd(),
            tip=self.tr('Go to a end tag'),
        )
        deleteTag_ = action(
            self.tr('&Delete Tag'),
            slot=lambda: self.colorTableWidget.deleteSelected())
        addTag_ = action(self.tr('&Add Tag'),
                         slot=lambda: self.colorTableWidget.addTag())
        insertTag_ = action(self.tr('&Insert Tag'),
                            slot=lambda: self.colorTableWidget.insertTag())

        fitWindow_ = action(self.tr('&Fit Window'),
                            slot=self.fitWindow,
                            icon='fit-window',
                            tip=self.tr('Zoom follows window size'))

        undo_ = action(self.tr('&Undo'),
                       slot=self.undo,
                       icon='undo',
                       tip=self.tr('undo'))

        self.zoom_widget = ZoomWidget()
        zoom_ = QtWidgets.QWidgetAction(self)
        zoom_.setDefaultWidget(self.zoom_widget)

        self.actions = utils.struct(
            open=open_,
            exit=exit_,
            openDir=openDir_,
            createMode=createMode_,
            createRectangleMode=createRectangleMode_,
            createCircleMode=createCircleMode_,
            createLineMode=createLineMode_,
            createPointMode=createPointMode_,
            createLineStripMode=createLineStripMode_,
            createBoxMode=createBoxMode_,
            edit=edit_,
            delete=delete_,
            save=save_,
            saveAs=saveAs_,
            nextTag=nextTag_,
            prevTag=prevTag_,
            homeTag=homeTag_,
            endTag=endTag_,
            deleteTag=deleteTag_,
            fitWindow=fitWindow_,
            deleteAll=deleteAll_,
            undo=undo_,
            # load=load_,
            fileMenu=(
                open_,
                openDir_,
                None,
                save_,
                saveAs_,
                None,
                # load_,
                None,
                exit_,
            ),
            editMenu=(
                createMode_,
                createRectangleMode_,
                createCircleMode_,
                createLineMode_,
                createPointMode_,
                createLineStripMode_,
                createBoxMode_,
                None,
                edit_,
                undo_,
                None,
                delete_,
                deleteAll_,
            ),
            selectionMenu=(
                nextTag_,
                prevTag_,
                homeTag_,
                endTag_,
            ),
            viewMenu=(
                self.labelListDock.toggleViewAction(),
                self.allLabelListDock.toggleViewAction(),
                self.colorTableDock.toggleViewAction(),
                None,
                fitWindow_,
            ),
            labelListMenu=(delete_, ),
            tagListMenu=(
                addTag_,
                insertTag_,
                deleteTag_,
            ))

        self.tools_actions = (
            open_,
            openDir_,
            save_,
            saveAs_,
            None,
            delete_,
            deleteAll_,
            undo_,
            None,
            createMode_,
            edit_,
            None,
            zoom_,
            fitWindow_,
        )
        self.toolbar = ToolBar('toolbar')
        self.toolbar.setToolButtonStyle(Qt.ToolButtonTextUnderIcon)
        self.addToolBar(Qt.TopToolBarArea, self.toolbar)
        utils.addActions(self.toolbar, self.tools_actions)
        # utils.addActions(self.canvas.menu, self.actions.editMenu)
        self.view.addMenu(self.actions.editMenu)
        self.labelListWidget.addMenu(self.actions.labelListMenu)
        self.allLabelList.addMenu(self.actions.labelListMenu)
        self.colorTableWidget.addMenu(self.actions.tagListMenu)

        self.addMenu("&File", self.actions.fileMenu)
        self.addMenu("&Edit", self.actions.editMenu)
        self.addMenu("&Selection", self.actions.selectionMenu)
        self.addMenu("&View", self.actions.viewMenu)
        # signal

        self.zoom_widget.valueChanged.connect(self.zoomChanged)
        self.labelListWidget.itemChanged.connect(self.labelItemChanged)
        self.labelListWidget.itemDoubleClicked.connect(
            lambda: (self.toggleDrawMode(None), self.labelSelectionChanged()))
        self.labelListWidget.itemSelectionChanged.connect(
            self.labelSelectionChanged)
        self.labelListWidget.itemDeleteSelected.connect(
            self.deleteSelectedShape)
        self.allLabelList.itemSelectionChanged.connect(
            self.allLabelListSelected)
        self.allLabelList.itemDoubleClicked.connect(self.editLabel)

        self.colorTableWidget.itemsDelete.connect(self.tagsDelete)

        self.init()
        self.initViewSlot()
Beispiel #24
0
from skimage.draw import circle
from skimage.color import rgb2grey

import config as c
import detect_arrowheads_new
import detect_lines
import detect_segments_cnn
from matplotlib import pyplot as plt, patches
import timeit
import numpy as np

import detect_segments_nb
import img_preprocessor
import optimize_pil

config = c.get_default_config()

RUNS = 1

# Teilzeiten
sub_total = []
sub_arrowhead = []
sub_lines = []
sub_segments = []
sub_combine = []
sub_preprocessing = []


class MyBuilder(pyocr.builders.TextBuilder):
    def __init__(self):
        super().__init__()
Beispiel #25
0
            lang_embeds = model.compute_lang_embed(q, rank)
            s = model.compute_similarity_on_frame(track, lang_embeds, rank)
            track_id = track["id"][0]
            track_score[track_id] = s
        top_tracks = sorted(track_score, key=track_score.get, reverse=True)
        with open(
                os.path.join(eval_cfg.LOG_DIR, eval_cfg.EXPR_NAME, "logs",
                             "%s.log" % query_id), "w") as f:
            for track in top_tracks:
                f.write("%s\n" % track)
    _logger.info("FINISHED.")


if __name__ == "__main__":
    FLAGS(sys.argv)
    cfg = get_default_config()
    cfg.merge_from_file(FLAGS.config_file)
    cfg.NUM_GPU_PER_MACHINE = FLAGS.num_gpus
    cfg.NUM_MACHINES = FLAGS.num_machines
    cfg.LOCAL_MACHINE = FLAGS.local_machine
    cfg.WORLD_SIZE = FLAGS.num_machines * FLAGS.num_gpus
    cfg.EXPR_NAME = cfg.EXPR_NAME + "_" + datetime.now().strftime(
        "%m_%d.%H:%M:%S.%f")
    cfg.INIT_METHOD = "tcp://%s:%d" % (FLAGS.master_ip, FLAGS.master_port)
    if cfg.TYPE == "TRAIN":
        mp.spawn(train_model_on_dataset,
                 args=(cfg, ),
                 nprocs=cfg.NUM_GPU_PER_MACHINE,
                 join=True)
    elif cfg.TYPE == "EVAL":
        with open(cfg.EVAL.QUERY_JSON_PATH, "r") as f:
Beispiel #26
0
def app(ctx, profile, alt_config, config_values, alt_data_dir, log_config,
        bootstrap_node, log_json, mining_pct, unlock, password, log_file):
    # configure logging
    slogging.configure(log_config, log_json=log_json, log_file=log_file)

    # data dir default or from cli option
    alt_data_dir = os.path.expanduser(alt_data_dir)
    data_dir = alt_data_dir or konfig.default_data_dir
    konfig.setup_data_dir(
        data_dir)  # if not available, sets up data_dir and required config
    log.info('using data in', path=data_dir)

    # prepare configuration
    # config files only contain required config (privkeys) and config different from the default
    if alt_config:  # specified config file
        config = konfig.load_config(alt_config)
        if not config:
            log.warning(
                'empty config given. default config values will be used')
    else:  # load config from default or set data_dir
        config = konfig.load_config(data_dir)

    config['data_dir'] = data_dir

    # Store custom genesis to restore if overridden by profile value
    genesis_from_config_file = config.get('eth', {}).get('genesis')

    # Store custom bootstrap_nodes to restore them overridden by profile value
    bootstrap_nodes_from_config_file = config.get('discovery',
                                                  {}).get('bootstrap_nodes')

    # add default config
    konfig.update_config_with_defaults(
        config, konfig.get_default_config([EthApp] + services))

    konfig.update_config_with_defaults(
        config, {'eth': {
            'block': blocks.default_config
        }})

    # Set config values based on profile selection
    merge_dict(config, PROFILES[profile])

    if genesis_from_config_file:
        # Fixed genesis_hash taken from profile must be deleted as custom genesis loaded
        del config['eth']['genesis_hash']
        config['eth']['genesis'] = genesis_from_config_file

    if bootstrap_nodes_from_config_file:
        # Fixed bootstrap_nodes taken from profile must be deleted as custom bootstrap_nodes loaded
        del config['discovery']['bootstrap_nodes']
        config['discovery'][
            'bootstrap_nodes'] = bootstrap_nodes_from_config_file

    pre_cmd_line_config_genesis = config.get('eth', {}).get('genesis')
    # override values with values from cmd line
    for config_value in config_values:
        try:
            konfig.set_config_param(config, config_value)
        except ValueError:
            raise BadParameter(
                'Config parameter must be of the form "a.b.c=d" where "a.b.c" '
                'specifies the parameter to set and d is a valid yaml value '
                '(example: "-c jsonrpc.port=5000")')

    if pre_cmd_line_config_genesis != config.get('eth', {}).get('genesis'):
        # Fixed genesis_hash taked from profile must be deleted as custom genesis loaded
        if 'genesis_hash' in config['eth']:
            del config['eth']['genesis_hash']

    # Load genesis config
    konfig.update_config_from_genesis_json(
        config, genesis_json_filename_or_dict=config['eth']['genesis'])
    if bootstrap_node:
        config['discovery']['bootstrap_nodes'] = [bytes(bootstrap_node)]
    if mining_pct > 0:
        config['pow']['activated'] = True
        config['pow']['cpu_pct'] = int(min(100, mining_pct))
    if not config.get('pow', {}).get('activated'):
        config['deactivated_services'].append(PoWService.name)

    ctx.obj = {
        'config': config,
        'unlock': unlock,
        'password': password.read().rstrip() if password else None,
        'log_file': log_file
    }
    assert (password and ctx.obj['password'] is not None and len(
        ctx.obj['password'])) or not password, "empty password file"
Beispiel #27
0
def main(argv, qc=None):
    global current_config
    current_config = get_default_config()
    # Specify the original name of the script to check the results.
    # Uses the second argument as binary input file, or the config value in case it is omitted.
    # The optional third argument controls whether the unverifiable scripts are to be removed.
    if len(argv) < 2:
        raise SystemExit("Please specify the script name!")

    base_dir = TidyDir("", guess=False)

    scriptname = argv[1] if not argv[1].endswith(
        ".py") else argv[1][:argv[1].rfind(".py")]
    original_file = scriptname + ".py"
    script_base_name = original_file[original_file.rfind("/") + 1:]
    (sub_dir, scrpt) = base_dir.split_path(scriptname)
    if scriptname.rfind("/"):
        scriptname = scriptname[scriptname.rfind("/") + 1:]
    cause_file = str(TidyDir(
        current_config["default_mut_dir"])) + scriptname + ".log"
    test_log = str(TidyDir(
        current_config["default_mut_dir"])) + scriptname + "_test_results.log"
    inputs_file = current_config["default_rejected"] if len(
        argv) < 3 else argv[2]
    clean_invalid = int(
        current_config["default_clean_invalid"]) if len(argv) < 4 else argv[3]
    all_inputs = []
    all_mutants = []
    behave = {}
    mutant_to_cause = {}
    num_workers = int(current_config["test_threads"])
    run_seq = []

    with open(cause_file, "r", encoding="UTF-8") as causes:
        for num, line in enumerate(causes):
            # Get the path to the original script
            if num > 0:
                # Use eval to get the pair representation of the line. The first element is the mutant.
                the_mutant = eval(line)[0]
                the_mutant = the_mutant.replace("//", "/")
                if not os.path.exists(the_mutant):
                    raise SystemExit("Could not find file: '" + the_mutant +
                                     "'.\nLog file: '" + cause_file +
                                     "' is corrupted.")
                adj_dir = base_dir if base_dir else scriptname[:scriptname.
                                                               rfind("/")]
                effect_set = mutant_to_cause.get(
                    the_mutant) if mutant_to_cause.get(the_mutant) else set()
                # Code mutant behaviour as integer for easy comparison
                if eval(line)[1].find("rejected") > -1:
                    effect_set.add(0)
                else:
                    effect_set.add(1)
                mutant_to_cause[the_mutant] = effect_set
                if the_mutant not in all_mutants:
                    all_mutants.append(the_mutant)

    qc = qc if qc is not None else int(current_config["quick_check"])
    if qc > 0:
        all_mutants = []
        top_page = []
        with open(test_log, "r", encoding="UTF-8") as fl:
            lst = fl.read().split("\n")
        for idx in range(0, len(lst), 3):
            e = lst[idx][:-1]
            if lst[idx + 1].rstrip().endswith(r"Fail: 0"):
                if not os.path.exists(e):
                    raise SystemExit("Cannot find file:" + e)
                all_mutants.append(e)
                top_page.append(lst[idx])
                top_page.append(lst[idx + 1])
                top_page.append(lst[idx + 2])
            elif re.match(r"-+", lst[idx]) or re.findall(
                    r"Fail: [123456789]+\d?", lst[idx + 1]):
                rest = lst[idx:-1]
                break

    rej_strs = pickle.load(open(inputs_file, "rb"))
    basein = find_baseinput(original_file, rej_strs)
    inputs = []
    # Find the used base candidate
    for cand in rej_strs:
        inputs.append(str(cand[0]))

    errs = {}
    # Check whether the used valid string is actually valid
    exc_orig = execute_script_with_argument(original_file, basein)
    if exc_orig:
        raise SystemExit("Original script rejects baseinput: " + repr(basein))

    # Check all mutants for behaviour changes
    cnt = 1
    # Layout all detected behaviour linearly, each index triplet contains observed results of a mutated file
    mut_behaves = [0 for _ in range(3 * len(all_mutants))]

    future_to_index = {}
    with concurrent.futures.ThreadPoolExecutor(max_workers=num_workers) as tpe:
        for i in range(0, len(mut_behaves), 3):
            my_mutant = all_mutants[int(i / 3)]
            # Find the used input
            my_input = my_mutant[:my_mutant.rfind("_")]
            my_input = inputs[int(my_input[my_input.rfind("_") + 1:])]
            # Check whether the valid string is rejected
            future_to_index[tpe.submit(exec_threaded, my_mutant, basein)] = i
            # Check the output of the original script for the rejected string
            future_to_index[tpe.submit(exec_threaded, original_file,
                                       my_input)] = i + 1
            # Check the output of the mutated script for the rejected string
            future_to_index[tpe.submit(exec_threaded, my_mutant,
                                       my_input)] = i + 2

        fidx = 0
        for future in concurrent.futures.as_completed(future_to_index):
            index = future_to_index[future]
            mut_behaves[index] = future.result()
            if future.result() == "-1":
                run_seq.append(index)
            if fidx % 3 == 0:
                print("Checking mutant:",
                      str(1 + int(fidx / 3)) + "/" + str(len(all_mutants)),
                      flush=True)
            fidx += 1

    seq_idx = 0
    if run_seq:
        print("Checking:",
              len(run_seq),
              "scripts in sequential mode....",
              flush=True)
        for sq_idx in run_seq:
            print("Checking script:",
                  str(seq_idx + 1),
                  "/",
                  str(len(run_seq)),
                  flush=True)
            seq_idx += 1
            my_mutant = all_mutants[int(sq_idx / 3)]
            sq_command = sq_idx % 3
            if sq_command == 0:
                mut_behaves[sq_idx] = execute_script_with_argument(
                    my_mutant,
                    basein,
                    tmout=int(current_config["unittest_timeout"]))
            else:
                my_input = my_mutant[:my_mutant.rfind("_")]
                my_input = inputs[int(my_input[my_input.rfind("_") + 1:])]
                if sq_command == 1:
                    mut_behaves[sq_idx] = execute_script_with_argument(
                        original_file,
                        my_input,
                        tmout=int(current_config["unittest_timeout"]))
                else:
                    mut_behaves[sq_idx] = execute_script_with_argument(
                        my_mutant,
                        my_input,
                        tmout=int(current_config["unittest_timeout"]))

    bhindex = 0
    while bhindex < len(mut_behaves):
        my_mutant = all_mutants[int(bhindex / 3)]
        exc_mutant_valid = mut_behaves[bhindex]
        exc_orig_invalid = mut_behaves[bhindex + 1]
        exc_mutant = mut_behaves[bhindex + 2]
        # Organize the observed behaviour of the mutant
        if exc_mutant_valid and exc_mutant_valid != "-1":
            bh = behave.get(my_mutant) if behave.get(my_mutant) else []
            bh.append("valid string rejected")
            behave[my_mutant] = bh

        if not exc_mutant:
            bh = behave.get(my_mutant) if behave.get(my_mutant) else []
            bh.append("invalid string accepted")
            behave[my_mutant] = bh

        elif exc_orig_invalid != exc_mutant and exc_mutant != "-1" and exc_orig_invalid != "-1":
            bh = behave.get(my_mutant) if behave.get(my_mutant) else []
            bh.append("invalid string raises new exception")
            behave[my_mutant] = bh

        # Compare expected and actual behaviour
        for e in mutant_to_cause.get(my_mutant):
            if e == 0 and (not exc_mutant_valid or exc_mutant_valid == "-1"):
                er = errs.get(my_mutant) if errs.get(my_mutant) else []
                er.append("valid string not rejected")
                errs[my_mutant] = er
            elif e == 1 and ((exc_mutant and exc_mutant == "-1") or
                             (exc_orig_invalid and exc_orig_invalid == "-1") or
                             (exc_mutant and exc_orig_invalid
                              and exc_mutant == exc_orig_invalid)):
                er = errs.get(my_mutant) if errs.get(my_mutant) else []
                er.append("mutated string not accepted")
                errs[my_mutant] = er

        bhindex += 3

    print()
    if not errs:
        print("No problems found.")
    else:
        print("Found", len(errs), "potential problem(s):")
        print(errs)

    if clean_invalid and errs:
        print()
        print("Removing potentially invalid scripts...")
        if str(base_dir) == "/":
            clean_and_fix_log(errs, cause_file)
        else:
            clean_and_fix_log(errs, cause_file, sub_dir, script_base_name)

    print()
    # Assign mutant class to scripts
    mut_0 = []
    mut_1 = []
    mut_2 = []
    for mut in behave:
        for bhvr in behave[mut]:
            if bhvr.find("rejected") >= 0:
                mut_0.append(mut)
            elif bhvr.find("raises") >= 0:
                mut_1.append(mut)
            elif bhvr.find("accepted") >= 0:
                mut_2.append(mut)

    behave_file = (current_config["default_mut_dir"] + "/").replace(
        "//", "/") + scriptname + "_verified.log"
    if os.path.exists(behave_file):
        os.remove(behave_file)
    with open(behave_file, "w", encoding="UTF-8") as dest:
        if mut_0:
            mut_0 = sorted(mut_0, key=by_index)
            dest.write("Valid string rejected:\n")
            for m_0 in mut_0:
                dest.write(repr(m_0) + "\n")
            dest.write("\n")
        if mut_1:
            mut_1 = sorted(mut_1, key=by_index)
            dest.write("Invalid string raises new exception:\n")
            for m_1 in mut_1:
                dest.write(repr(m_1) + "\n")
            dest.write("\n")
        if mut_2:
            mut_2 = sorted(mut_2, key=by_index)
            dest.write("Invalid string accepted:\n")
            for m_2 in mut_2:
                dest.write(repr(m_2) + "\n")

    if qc > 0:
        if all_mutants:
            for test_mut in all_mutants:
                if not os.path.exists(test_mut):
                    idx = top_page.index(test_mut + ":")
                    # Delete the 3 lines belonging to this mutant
                    del top_page[idx]
                    del top_page[idx]
                    del top_page[idx]

            # Remove the - indicator line as there are no 0 fail mutants left
            if not top_page:
                rest = rest[2:]

            with open(test_log, "w", encoding="UTF-8") as dst:
                for ln in top_page + rest:
                    dst.write(ln + "\n")
Beispiel #28
0
    parser.add_argument('--max_epochs', type=int, default=None)
    parser.add_argument('--learning_rate', type=float, default=None)
    parser.add_argument('--clipnorm', type=float, default=None)
    parser.add_argument('--remove_all_head_tail_edges', action='store_true', default=None)
    parser.add_argument('--timer', action='store_true', default=None)
    parser.add_argument('--print_train', action='store_true', default=None)
    parser.add_argument('--print_train_metric', action='store_true', default=None)
    parser.add_argument('--print_train_freq', type=int, default=None)
    parser.add_argument('--eval_within_epoch', default=None)
    parser.add_argument('--eval_valid', action='store_true', default=None)
    parser.add_argument('--moving_mean_decay', type=float, default=None)
    parser.add_argument('--test_output_attention', action='store_true', default=None)
    parser.add_argument('--test_analyze_attention', action='store_true', default=None)
    args = parser.parse_args()

    default_parser = config.get_default_config(args.dataset)
    hparams = copy.deepcopy(default_parser.parse_args())
    for arg in vars(args):
        attr = getattr(args, arg)
        if attr is not None:
            setattr(hparams, arg, attr)
    print(hparams)

    if hparams.dataset == 'NELL995':
        nell995_cls = getattr(datasets, hparams.dataset)
        for ds in nell995_cls.datasets():
            print('nell > ' + ds.name)
            if hparams.test_output_attention:
                dir_name = '../output/NELL995_subgraph/' + ds.name
                hparams.dir_name = dir_name
                if os.path.exists(dir_name):
def main(argv):
    global current_config
    current_config = get_default_config()
    # Specify the original name of the script or its path to check the results.
    if len(argv) < 2:
        raise SystemExit("Please specify the folder the scripts are in!")
    argv[1] = argv[1].replace("\\", "/")
    argv[1] = argv[1][:-1] if argv[1].endswith("/") else argv[1]
    test_res_fl = argv[1] + "_test_results.log"
    mutant_fail_dict = argv[1] + "_fail_dict.log"
    lwriter = LogWriter(test_res_fl)
    scripts_f = []
    scripts_p = []
    targets = []
    mutant_to_testfail = {}
    num_workers = int(current_config["test_threads"])

    for fnm in glob.iglob(argv[1] + "/*.py", recursive=True):
        fnm = fnm.replace("\\", "/")
        if fnm not in targets:
            targets.append(fnm)

    with concurrent.futures.ThreadPoolExecutor(max_workers=num_workers) as tpe:
        future_to_script = {
            tpe.submit(run_tests_threaded, test_script): test_script
            for test_script in targets
        }
        fidx = 0
        for future in concurrent.futures.as_completed(future_to_script):
            print("Running tests:",
                  str(fidx + 1) + "/" + str(len(targets)),
                  flush=True)
            test_script = future_to_script[future]
            (testnum, failnames) = future.result()
            if failnames is not None:
                tpass = testnum - len(failnames)
                tfail = len(failnames)
                mutant_to_testfail[test_script] = failnames

            else:
                tpass = -1
                tfail = -1

            if tfail == 0:
                if tpass > 0:
                    scripts_p.append((test_script, (tpass, tfail)))
            else:
                scripts_f.append((test_script, (tpass, tfail)))
            lwriter.append_line(test_script + ":\nPass: "******", Fail: " + str(tfail) + " \n" + "\n")
            fidx += 1

    # Write the test stats to a file. Mutants that fail no tests are at the top if they exist.
    with open(test_res_fl, "w", encoding="UTF-8") as dest:
        scripts_p = sorted(scripts_p, key=by_index)
        for (scrpt, (tpass, tfail)) in scripts_p:
            dest.write(scrpt + ":\nPass: "******", Fail: " +
                       str(tfail) + " \n")
            dest.write("\n")

        if scripts_p and scripts_f:
            dest.write(
                "---------------------------------------------------------------------------------------------------\n"
            )
            dest.write("\n")

        scripts_f = sorted(sorted(scripts_f, key=by_index), key=by_fail)

        for (scrpt, (tpass, tfail)) in scripts_f:
            dest.write(scrpt + ":\nPass: "******", Fail: " +
                       str(tfail) + " \n")
            dest.write("\n")

    # Log which tests the mutants failed to enable minimal mutant set generation
    with open(mutant_fail_dict, "w", encoding="UTF-8") as dest:
        dest.write(repr(mutant_to_testfail))