예제 #1
0
        stdout, stderr = pro.communicate()
        returncode = pro.returncode

    # 根据执行结果判断是否通过
    return judge_pass(returncode, stdout, stderr)


if __name__ == '__main__':
    # 环境检查
    from Fuzzer.environment_check import check
    check()

    # 读取工作路径和训练词汇表
    print("正在恢复词汇表和模型,请稍等...")
    workspace_path = os.path.dirname(hparams.gen_model)
    token_to_idx = load_json(os.path.join(workspace_path, 'token_to_idx.json'))
    idx_to_token = load_json(os.path.join(workspace_path, 'idx_to_token.json'),
                             transfer=True)

    # 恢复模型(注意load方法没有device参数)
    model = torch.load(hparams.gen_model,
                       map_location=f'cuda:{hparams.gpu}').to(device)
    model.device = device

    # 设置计数器
    total_count = 0
    pass_count = 0
    no_pass_count = 0
    engine = '/root/.jsvu/ChakraCoreFiles/bin/ch'
    pass_testcases = []
    no_pass_testcases = []
예제 #2
0
from CodeGenerator.model import LSTM

device = torch.device(
    f"cuda:{hparams.gpu}" if torch.cuda.is_available() else "cpu")


def write_file(file_path, content: str):
    with open(file_path, 'a+', encoding='utf-8') as f:
        f.write(content)


if __name__ == '__main__':
    # 读取工作路径和训练词汇表
    print("正在恢复词汇表和模型,请稍等...")
    workspace_path = os.path.dirname(hparams.gen_model)
    char_to_idx = load_json(os.path.join(workspace_path, 'char_to_idx.json'))
    idx_to_char = load_json(os.path.join(workspace_path, 'idx_to_char.json'),
                            transfer=True)

    # 恢复模型(注意load方法没有device参数)
    model = torch.load(hparams.gen_model).to(device)
    model.device = device

    # 设置计数器
    seed_count = 0
    total_count = 0
    new_seed_count = 0
    syntax_correct_count = 0
    semantic_correct_count = 0
    filtered_case_count = 0