Exemple #1
0
def test(model, ema, args, data):
    device = torch.device(f"cuda:{args.gpu}" if torch.cuda.is_available() else "cpu")
    criterion = nn.CrossEntropyLoss()
    loss = 0
    answers = dict()
    model.eval()

    backup_params = EMA(0)
    for name, param in model.named_parameters():
        if param.requires_grad:
            backup_params.register(name, param.data)
            param.data.copy_(ema.get(name))


    total_time = 0 
    previous_time = time.time()
    for batch in iter(data.dev_iter):
        #time1 = time.time()
        with torch.no_grad():
            p1, p2 = model(batch.c_char,batch.q_char,batch.c_word[0],batch.q_word[0],batch.c_word[1],batch.q_word[1])
        #p1, p2 = model(batch)
        #time2 = time.time()
        #total_time = total_time + time2 - time1
        batch_loss = criterion(p1, batch.s_idx) + criterion(p2, batch.e_idx)
        loss += batch_loss.item()

        # (batch, c_len, c_len)
        batch_size, c_len = p1.size()
        ls = nn.LogSoftmax(dim=1)
        mask = (torch.ones(c_len, c_len) * float('-inf')).to(device).tril(-1).unsqueeze(0).expand(batch_size, -1, -1)
        score = (ls(p1).unsqueeze(2) + ls(p2).unsqueeze(1)) + mask
        score, s_idx = score.max(dim=1)
        score, e_idx = score.max(dim=1)
        s_idx = torch.gather(s_idx, 1, e_idx.view(-1, 1)).squeeze()

        for i in range(batch_size):
            id = batch.id[i]
            answer = batch.c_word[0][i][s_idx[i]:e_idx[i] + 1]
            answer = ' '.join([data.CONTEXT_WORD.vocab.itos[idx] for idx in answer])
            if answer == "<eos>":
                answer = ""
            answers[id] = answer
    #print(f'one epoch time {time.time()-previous_time}')
    #print(f'total time {total_time}')

    for name, param in model.named_parameters():
        if param.requires_grad:
            param.data.copy_(backup_params.get(name))

    with open(args.prediction_file, 'w', encoding='utf-8') as f:
        print(json.dumps(answers), file=f)

    opts = evaluate.parse_args(args=[f"{args.dataset_file}", f"{args.prediction_file}" ])     

    results = evaluate.main(opts)
    return loss, results['exact'], results['f1'], results['HasAns_exact'], results['HasAns_f1'], results['NoAns_exact'], results['NoAns_f1']
Exemple #2
0
def test(model, ema, args, data):
    device = torch.device(
        f"cuda:{args.gpu}" if torch.cuda.is_available() else "cpu")
    criterion = nn.CrossEntropyLoss()
    loss = 0
    answers = dict()
    model.eval()

    backup_params = EMA(0)
    for name, param in model.named_parameters():
        if param.requires_grad:
            backup_params.register(name, param.data)
            param.data.copy_(ema.get(name))

    with torch.set_grad_enabled(False):
        for batch in iter(data.dev_iter):
            p1, p2 = model(batch)
            batch_loss = criterion(p1, batch.s_idx) + criterion(
                p2, batch.e_idx)
            loss += batch_loss.item()

            # (batch, c_len, c_len)
            batch_size, c_len = p1.size()
            ls = nn.LogSoftmax(dim=1)
            mask = (torch.ones(c_len, c_len) *
                    float('-inf')).to(device).tril(-1).unsqueeze(0).expand(
                        batch_size, -1, -1)
            score = (ls(p1).unsqueeze(2) + ls(p2).unsqueeze(1)) + mask
            score, s_idx = score.max(dim=1)
            score, e_idx = score.max(dim=1)
            s_idx = torch.gather(s_idx, 1, e_idx.view(-1, 1)).squeeze()

            for i in range(batch_size):
                id = batch.id[i]
                answer = batch.c_word[0][i][s_idx[i]:e_idx[i] + 1]
                answer = ' '.join(
                    [data.WORD.vocab.itos[idx] for idx in answer])
                answers[id] = answer

        for name, param in model.named_parameters():
            if param.requires_grad:
                param.data.copy_(backup_params.get(name))

    #print(answers)

    with open(args.prediction_file, 'w', encoding='utf-8') as f:
        print(json.dumps(answers, indent=4), file=f)

    results = evaluate.main(args, answers, data)
    return loss / len(data.dev_iter), results['exact_match'], results['f1']
Exemple #3
0
def cw_tree_attack_targeted():
    cw = CarliniL2_qa(debug=args.debugging)
    criterion = nn.CrossEntropyLoss()
    loss = 0
    tot = 0
    adv_loss = 0
    targeted_success = 0
    untargeted_success = 0
    adv_text = []
    answers = dict()
    adv_answers = dict()
    # model.eval()

    embed = torch.load(args.word_vector)
    device = torch.device("cuda:0" if args.cuda else "cpu")
    vocab = Vocab(filename=args.dictionary,
                  data=[PAD_WORD, UNK_WORD, EOS_WORD, SOS_WORD])
    generator = Generator(args.test_data, vocab=vocab, embed=embed)
    transfered_embedding = torch.load('bidaf_transfered_embedding.pth')
    transfer_emb = torch.nn.Embedding.from_pretrained(transfered_embedding).to(
        device)
    seqback = WrappedSeqback(embed,
                             device,
                             attack=True,
                             seqback_model=generator.seqback_model,
                             vocab=vocab,
                             transfer_emb=transfer_emb)
    treelstm = generator.tree_model
    generator.load_state_dict(torch.load(args.load_ae))

    backup_params = EMA(0)
    for name, param in model.named_parameters():
        if param.requires_grad:
            backup_params.register(name, param.data)
            param.data.copy_(ema.get(name))

    class TreeModel(nn.Module):
        def __init__(self):
            super(TreeModel, self).__init__()
            self.inputs = None

        def forward(self, hidden):
            self.embedding = seqback(hidden)
            return model(batch, perturbed=self.embedding)

        def set_temp(self, temp):
            seqback.temp = temp

        def get_embedding(self):
            return self.embedding

        def get_seqback(self):
            return seqback

    tree_model = TreeModel()
    for batch in tqdm(iter(data.dev_iter), total=1000):
        p1, p2 = model(batch)
        orig_answer, orig_s_idx, orig_e_idx = write_to_ans(
            p1, p2, batch, answers)
        batch_loss = criterion(p1, batch.s_idx) + criterion(p2, batch.e_idx)
        loss += batch_loss.item()

        append_info = append_input(batch, vocab)
        batch_add_start = append_info['add_start']
        batch_add_end = append_info['add_end']
        batch_start_target = torch.LongTensor(
            append_info['target_start']).to(device)
        batch_end_target = torch.LongTensor(
            append_info['target_end']).to(device)
        add_sents = append_info['append_sent']

        input_embedding = model.word_emb(batch.c_word[0])
        append_info['tree'] = [generator.get_tree(append_info['tree'])]
        seqback.sentences = input_embedding.clone().detach()
        seqback.batch_trees = append_info['tree']
        seqback.batch_add_sent = append_info['ae_sent']
        seqback.start = append_info['add_start']
        seqback.end = append_info['add_end']
        seqback.adv_sent = []

        batch_tree_embedding = []
        for bi, append_sent in enumerate(append_info['ae_sent']):
            seqback.target_start = append_info['target_start'][
                0] - append_info['add_start'][0]
            seqback.target_end = append_info['target_end'][0] - append_info[
                'add_start'][0]
            sentences = [
                torch.tensor(append_sent, dtype=torch.long, device=device)
            ]
            seqback.target = sentences[0][seqback.
                                          target_start:seqback.target_end + 1]
            trees = [append_info['tree'][bi]]
            tree_embedding = treelstm(sentences, trees)[0][0].detach()
            batch_tree_embedding.append(tree_embedding)
        hidden = torch.cat(batch_tree_embedding, dim=0)
        cw.batch_info = append_info
        cw.num_classes = append_info['tot_length']

        adv_hidden = cw.run(tree_model,
                            hidden, (batch_start_target, batch_end_target),
                            input_token=input_embedding)
        seqback.adv_sent = []

        # re-test
        for bi, (add_start,
                 add_end) in enumerate(zip(batch_add_start, batch_add_end)):
            if bi in cw.o_best_sent:
                ae_words = cw.o_best_sent[bi]
                bidaf_tokens = bidaf_convert_to_idx(ae_words)
                batch.c_word[0].data[bi, add_start:add_end] = torch.LongTensor(
                    bidaf_tokens)
        p1, p2 = model(batch)
        adv_answer, adv_s_idx, adv_e_idx = write_to_ans(
            p1, p2, batch, adv_answers)
        batch_loss = criterion(p1, batch.s_idx) + criterion(p2, batch.e_idx)
        adv_loss += batch_loss.item()

        for bi, (start_target, end_target) in enumerate(
                zip(batch_start_target, batch_end_target)):
            start_output = adv_s_idx
            end_output = adv_e_idx
            targeted_success += int(
                compare(start_output, start_target.item(), end_output,
                        end_target.item()))
            untargeted_success += int(
                compare_untargeted(start_output, start_target.item(),
                                   end_output, end_target.item()))

        for i in range(len(add_sents)):
            logger.info(("orig:", transform(add_sents[i])))
            try:
                logger.info(("adv:", cw.o_best_sent[i]))
                adv_text.append({
                    'adv_text': cw.o_best_sent[i],
                    'qas_id': batch.id[i],
                    'adv_predict': (orig_s_idx, orig_e_idx),
                    'orig_predict': (adv_s_idx, adv_e_idx),
                    'Orig answer:': orig_answer,
                    'Adv answer:': adv_answer
                })
                joblib.dump(adv_text, root_dir + '/adv_text.pkl')
            except:
                adv_text.append({
                    'adv_text': transform(add_sents[i]),
                    'qas_id': batch.id[i],
                    'adv_predict': (orig_s_idx, orig_e_idx),
                    'orig_predict': (adv_s_idx, adv_e_idx),
                    'Orig answer:': orig_answer,
                    'Adv answer:': adv_answer
                })
                joblib.dump(adv_text, root_dir + '/adv_text.pkl')
                continue
        # for batch size = 1
        tot += 1
        logger.info(("orig predict", (orig_s_idx, orig_e_idx)))
        logger.info(("adv append predict", (adv_s_idx, adv_e_idx)))
        logger.info(("targeted successful rate:", targeted_success))
        logger.info(("untargetd successful rate:", untargeted_success))
        logger.info(("Orig answer:", orig_answer))
        logger.info(("Adv answer:", adv_answer))
        logger.info(("tot:", tot))

    for name, param in model.named_parameters():
        if param.requires_grad:
            param.data.copy_(backup_params.get(name))

    with open(options.prediction_file, 'w', encoding='utf-8') as f:
        print(json.dumps(answers), file=f)
    with open(options.prediction_file + '_adv.json', 'w',
              encoding='utf-8') as f:
        print(json.dumps(adv_answers), file=f)
    results = evaluate.main(options)
    logger.info(tot)
    logger.info(("adv loss, results['exact_match'], results['f1']", loss,
                 results['exact_match'], results['f1']))
    return loss, results['exact_match'], results['f1']
Exemple #4
0
def cw_random_word_attack():
    cw = CarliniL2_untargeted_qa(debug=args.debugging)
    criterion = nn.CrossEntropyLoss()
    loss = 0
    adv_loss = 0
    targeted_success = 0
    untargeted_success = 0
    adv_text = []
    answers = dict()
    adv_answers = dict()

    backup_params = EMA(0)
    for name, param in model.named_parameters():
        if param.requires_grad:
            backup_params.register(name, param.data)
            param.data.copy_(ema.get(name))
    tot = 0
    for batch in tqdm(iter(data.dev_iter), total=1000):
        p1, p2 = model(batch)
        orig_answer, orig_s_idx, orig_e_idx = write_to_ans(
            p1, p2, batch, answers)
        batch_loss = criterion(p1, batch.s_idx) + criterion(p2, batch.e_idx)
        loss += batch_loss.item()

        append_info = append_random_input(batch)
        allow_idxs = append_info['allow_idx']
        batch_start_target = torch.LongTensor([0]).to(device)
        batch_end_target = torch.LongTensor([0]).to(device)

        input_embedding = model.word_emb(batch.c_word[0])
        cw_mask = np.zeros(input_embedding.shape).astype(np.float32)
        cw_mask = torch.from_numpy(cw_mask).float().to(device)

        for bi, allow_idx in enumerate(allow_idxs):
            cw_mask[bi, np.array(allow_idx)] = 1
        cw.wv = model.word_emb.weight
        cw.inputs = batch
        cw.mask = cw_mask
        cw.batch_info = append_info
        cw.num_classes = append_info['tot_length']
        # print(transform(to_list(batch.c_word[0][0])))
        cw.run(model, input_embedding, (batch_start_target, batch_end_target))

        # re-test
        for bi, allow_idx in enumerate(allow_idxs):
            if bi in cw.o_best_sent:
                for i, idx in enumerate(allow_idx):
                    batch.c_word[0].data[bi, idx] = cw.o_best_sent[bi][i]
        p1, p2 = model(batch)
        adv_answer, adv_s_idx, adv_e_idx = write_to_ans(
            p1, p2, batch, adv_answers)
        batch_loss = criterion(p1, batch.s_idx) + criterion(p2, batch.e_idx)
        adv_loss += batch_loss.item()

        for bi, (start_target, end_target) in enumerate(
                zip(batch_start_target, batch_end_target)):
            start_output = adv_s_idx
            end_output = adv_e_idx
            targeted_success += int(
                compare(start_output, start_target.item(), end_output,
                        end_target.item()))
            untargeted_success += int(
                compare_untargeted(start_output, start_target.item(),
                                   end_output, end_target.item()))
        for i in range(len(allow_idxs)):
            try:
                logger.info(("adv:", transform(cw.o_best_sent[i])))
                adv_text.append({
                    'added_text':
                    transform(cw.o_best_sent[i]),
                    'adv_text':
                    transform(to_list(batch.c_word[0][0])),
                    'qas_id':
                    batch.id[i],
                    'adv_predict': (orig_s_idx, orig_e_idx),
                    'orig_predict': (adv_s_idx, adv_e_idx),
                    'Orig answer:':
                    orig_answer,
                    'Adv answer:':
                    adv_answer
                })
                joblib.dump(adv_text, root_dir + '/adv_text.pkl')
            except:
                adv_text.append({
                    'adv_text':
                    transform(to_list(batch.c_word[0][0])),
                    'qas_id':
                    batch.id[i],
                    'adv_predict': (orig_s_idx, orig_e_idx),
                    'orig_predict': (adv_s_idx, adv_e_idx),
                    'Orig answer:':
                    orig_answer,
                    'Adv answer:':
                    adv_answer
                })
                joblib.dump(adv_text, root_dir + '/adv_text.pkl')
                continue
        # for batch size = 1
        tot += 1
        logger.info(("orig predict", (orig_s_idx, orig_e_idx)))
        logger.info(("adv append predict", (adv_s_idx, adv_e_idx)))
        logger.info(("targeted successful rate:", targeted_success))
        logger.info(("untargetd successful rate:", untargeted_success))
        logger.info(("Orig answer:", orig_answer))
        logger.info(("Adv answer:", adv_answer))
        logger.info(("tot:", tot))

    for name, param in model.named_parameters():
        if param.requires_grad:
            param.data.copy_(backup_params.get(name))

    with open(options.prediction_file, 'w', encoding='utf-8') as f:
        print(json.dumps(answers), file=f)
    with open(options.prediction_file + '_adv.json', 'w',
              encoding='utf-8') as f:
        print(json.dumps(adv_answers), file=f)
    results = evaluate.main(options)
    logger.info(tot)
    logger.info(("adv loss, results['exact_match'], results['f1']", loss,
                 results['exact_match'], results['f1']))
    return loss, results['exact_match'], results['f1']