Example #1
0
def get_proof(sexp_cache, args):
    coq_filename = os.path.splitext(args.file)[0] + '.v'
    fields = coq_filename.split(os.path.sep)
    loc2code = get_code(open(coq_filename, 'rb').read())
    meta = open(args.file).read()
    coq_code = extract_code(meta, loc2code)
    file_data = json.load(open(os.path.join(args.data_path, args.file[13:-5] + '.json')))

    with SerAPI(args.timeout, args.debug) as serapi:
        num_extra_cmds = len(set_paths(meta, serapi, sexp_cache))
        
        # process the coq code
        proof_start_lines = []
        in_proof = False
        for num_executed, (code_line, tags) in enumerate(coq_code):
            assert code_line == file_data['vernac_cmds'][num_extra_cmds + num_executed][0]
            if 'PROOF_NAME' in tags and tags['PROOF_NAME'] == args.proof:  # the proof ends
                serapi.pop() 
                line_nb = proof_start_lines[-1]
                proof_data = record_proof(num_extra_cmds, line_nb, coq_code[line_nb + 1:], sexp_cache, serapi, args)
                break
            # execute the code
            if args.debug:
                print('%d: %s' % (num_executed, code_line))
            serapi.execute(code_line)
            if serapi.has_open_goals():
                if not in_proof:  # the proof starts
                    in_proof = True
                    proof_start_lines.append(num_executed)
                    serapi.push()
            else:
                in_proof = False

    return proof_data 
Example #2
0
def check_file(filename, sexp_cache, args):
    coq_filename = os.path.splitext(filename)[0] + '.v'
    fields = coq_filename.split(os.path.sep)
    loc2code = get_code(open(coq_filename, 'rb').read())
    file_data = {
        'filename': os.path.sep.join(fields[2:]),
        'coq_project': fields[1],
        'vernac_cmds': [],
        'num_extra_cmds': None,
        'proofs': [],
    }
    meta = open(filename).read()

    with SerAPI(args.timeout, args.debug) as serapi:
        # set the path
        file_data['vernac_cmds'].extend(set_paths(meta, serapi, sexp_cache))
        file_data['num_extra_cmds'] = len(file_data['vernac_cmds'])

        # extract the coq code
        coq_code = extract_code(meta, loc2code)

        # process the coq code
        for num_executed, (code_line, tags) in enumerate(coq_code):
            if 'PROOF_NAME' in tags:
                if tags['PROOF_NAME'] not in file_data['proofs']:
                    file_data['proofs'].append(tags['PROOF_NAME'])
            # execute until the proof editing mode starts
            if args.debug:
                print('%d: %s' % (num_executed, code_line))
            _, ast = serapi.execute(code_line, return_ast=True)
            file_data['vernac_cmds'].append(
                (code_line, tags['VERNAC_TYPE'], sexp_cache.dump(ast)))

    return file_data
Example #3
0
def check_file(filename, sexp_cache, args):
    print("\n" + filename)
    coq_filename = os.path.splitext(filename)[0] + ".v"
    fields = coq_filename.split(os.path.sep)
    loc2code = get_code(open(coq_filename, "rb").read())
    file_data = {
        "filename": os.path.sep.join(fields[2:]),
        "coq_project": fields[1],
        "vernac_cmds": [],
        "num_extra_cmds": None,
        "proofs": [],
    }
    meta = open(filename).read()

    with SerAPI(args.timeout, args.debug) as serapi:
        # set the path
        file_data["vernac_cmds"].extend(set_paths(meta, serapi, sexp_cache))
        file_data["num_extra_cmds"] = len(file_data["vernac_cmds"])

        # extract the coq code
        coq_code = extract_code(meta, loc2code)

        # process the coq code
        for num_executed, (code_line, tags) in enumerate(coq_code):
            if "PROOF_NAME" in tags:
                if tags["PROOF_NAME"] not in file_data["proofs"]:
                    file_data["proofs"].append(tags["PROOF_NAME"])
            # execute until the proof editing mode starts
            if args.debug:
                print("%d: %s" % (num_executed, code_line))
            _, ast = serapi.execute(code_line, return_ast=True)
            file_data["vernac_cmds"].append(
                (code_line, tags["VERNAC_TYPE"], sexp_cache.dump(ast)))

    return file_data
Example #4
0
def get_proof(args):

    coq_filename = os.path.splitext(args.file)[0] + '.v'
    fields = coq_filename.split(os.path.sep)
    loc2code = get_code(open(coq_filename, 'rb').read())
    meta = open(args.file).read()
    coq_code = extract_code(meta, loc2code)
    with open(os.path.join(args.data_path, args.file[13:-5] + '.json'),
              "r") as json_data:
        file_data = json.load(json_data)
    with open(args.file0, "r") as json_data:
        proof_data0 = json.load(json_data)
    proof_data0 = [tempdic['name'] for tempdic in proof_data0['proofs']]
    for tempproof in file_data['proofs']:
        args.proof = tempproof
        if tempproof not in proof_data0:
            continue
        if os.path.isfile(os.path.join(
                dirname, args.proof +
                '.json')):  # tempproof != 'loc_unconstrained_satisf':
            continue
        db_path = os.path.join(dirname, args.proof + '-sexp_cache')
        sexp_cache = SexpCache(db_path)

        with SerAPI(args.timeout, args.debug) as serapi:
            num_extra_cmds = len(set_paths(meta, serapi, sexp_cache))

            # process the coq code
            proof_start_lines = []
            in_proof = False
            for num_executed, (code_line, tags) in enumerate(coq_code):
                assert code_line == file_data['vernac_cmds'][num_extra_cmds +
                                                             num_executed][0]
                if 'PROOF_NAME' in tags and tags[
                        'PROOF_NAME'] == args.proof:  # the proof ends
                    serapi.pop()
                    line_nb = proof_start_lines[-1]

                    proof_data = record_proof(num_extra_cmds, line_nb,
                                              coq_code[line_nb + 1:],
                                              sexp_cache, serapi, args)
                    if proof_data is not None:
                        dump(proof_data, args)
                    break
                # execute the code
                if args.debug:
                    print('%d: %s' % (num_executed, code_line))
                serapi.execute(code_line)
                if serapi.has_open_goals():
                    if not in_proof:  # the proof starts
                        in_proof = True
                        proof_start_lines.append(num_executed)
                        serapi.push()
                else:
                    in_proof = False

    return
Example #5
0
def start(record_name: str = settings.record_name, mode: str = settings.mode):
    settings.mode = mode
    settings.record_name = record_name

    set_mode()
    utils.set_paths()

    if is_record:
        print_welcome("RECORDING")
        global recorder
        recorder = Recorder()
        recorder.prepare()
    elif is_playback:
        print_welcome("PLAYBACK")
        global player
        player = Player(accept_socket)
        player.prepare()
    elif is_proxy:
        print_welcome("PROXY")
Example #6
0
def eval_vae_lr():
    config = get_config()

    # set seed
    SEED = config['seed']
    np.random.seed(SEED)
    torch.manual_seed(SEED)
    random.seed(SEED)

    experiment_dir = config['experiment_dir']
    experiment_path = Path(os.getcwd()) / f'data/vae_model/{experiment_dir}'
    flags_path = experiment_path / 'flags.rar'
    FLAGS = torch.load(flags_path)
    FLAGS.save_figure = True
    FLAGS.dir_cond_gen = Path(__file__).parent.parent / 'data/cond_gen'
    FLAGS.text_gen_lastlayer = 'softmax'

    FLAGS = set_paths(FLAGS, config)
    FLAGS.use_clf = False
    FLAGS.batch_size = 30
    # FLAGS.undersample_dataset = True
    state_dict_path = experiment_path / 'checkpoints/0149/mm_vae'

    mimic_experiment = MimicExperiment(flags=FLAGS)
    mimic_experiment.tb_logger = Dummylogger()
    mimic_experiment.mm_vae.to(FLAGS.device)
    mimic_experiment.mm_vae.load_state_dict(
        state_dict=torch.load(state_dict_path))
    mimic_experiment.mm_vae.eval()

    results = {}
    for binay_labels in [True]:
        mimic_experiment.flags.binary_labels = binay_labels
        with torch.no_grad():
            clf_lr = train_clf_lr_all_subsets(mimic_experiment,
                                              weighted_sampler=False)
            predictions, gt = test_clf_lr_all_subsets(clf_lr, mimic_experiment)
            for subset in predictions:
                # calculate metrics
                metrics = Metrics(predictions[subset],
                                  gt,
                                  str_labels=get_labels(FLAGS.binary_labels))
                metrics_dict = metrics.evaluate()
                results[subset] = metrics_dict
                print(subset, ':', metrics_dict[config['eval_metric']][0])

    log.info(f'Lr eval results: {results}')

    out_path = Path(os.getcwd(
    )) / f'data/lr_eval_results{"_bin_label" if binay_labels else ""}.json'
    log.info(f'Saving lr eval test results to {out_path}')

    with open(out_path, 'w') as outfile:
        json.dump(results, outfile)