def Checklist2020(model): """An implementation of the attack used in "Beyond Accuracy: Behavioral Testing of NLP models with CheckList", Ribeiro et al., 2020.". This attack focuses on a number of attacks used in the Invariance Testing Method: - Contraction - Extension - Changing Names, Number, Location - possibly negation (not yet implemented) The idea is to alter elements of the sentence without actually changing the semantic of the sentence https://arxiv.org/abs/2005.04118 :param model: Model to attack. :param max_num_word_swaps: Maximum number of modifications to allow. """ transformation = CompositeTransformation( [ # WordSwapExtend(), WordSwapContract(), WordSwapChangeName(), # WordSwapChangeNumber(), WordSwapChangeLocation(), ] ) # Need this constraint to prevent extend and contract modifying each others' changes and forming infinite loop constraints = [RepeatModification()] # Untargeted attack & GreedySearch goal_function = UntargetedClassification(model) search_method = GreedySearch() return Attack(goal_function, constraints, transformation, search_method)
def build(model, max_num_word_swaps=1): # a combination of 4 different character-based transforms # ignore the first and last letter of each word, as in the paper transformation = CompositeTransformation([ WordSwapNeighboringCharacterSwap(random_one=False, skip_first_char=True, skip_last_char=True), WordSwapRandomCharacterDeletion(random_one=False, skip_first_char=True, skip_last_char=True), WordSwapRandomCharacterInsertion(random_one=False, skip_first_char=True, skip_last_char=True), WordSwapQWERTY(random_one=False, skip_first_char=True, skip_last_char=True), ]) # only edit words of length >= 4, edit max_num_word_swaps words. # note that we also are not editing the same word twice, so # max_num_word_swaps is really the max number of character # changes that can be made. The paper looks at 1 and 2 char attacks. constraints = [ MinWordLength(min_length=4), StopwordModification(), MaxWordsPerturbed(max_num_words=max_num_word_swaps), RepeatModification(), ] # untargeted attack goal_function = UntargetedClassification(model) search_method = GreedySearch() return Attack(goal_function, constraints, transformation, search_method)
def MorpheusTan2020(model): """Samson Tan, Shafiq Joty, Min-Yen Kan, Richard Socher. It’s Morphin’ Time! Combating Linguistic Discrimination with Inflectional Perturbations https://www.aclweb.org/anthology/2020.acl-main.263/ """ # # Goal is to minimize BLEU score between the model output given for the # perturbed input sequence and the reference translation # goal_function = MinimizeBleu(model) # Swap words with their inflections transformation = WordSwapInflections() # # Don't modify the same word twice or stopwords # constraints = [RepeatModification(), StopwordModification()] # # Greedily swap words (see pseudocode, Algorithm 1 of the paper). # search_method = GreedySearch() return Attack(goal_function, constraints, transformation, search_method)
def Kuleshov2017(model): """ Kuleshov, V. et al. Generating Natural Language Adversarial Examples. https://openreview.net/pdf?id=r1QZ3zbAZ. """ # # "Specifically, in all experiments, we used a target of τ = 0.7, # a neighborhood size of N = 15, and parameters λ_1 = 0.2 and δ = 0.5; we set # the syntactic bound to λ_2 = 2 nats for sentiment analysis" # # Word swap with top-15 counter-fitted embedding neighbors. # transformation = WordSwapEmbedding(max_candidates=15) # # Don't modify the same word twice or stopwords # constraints = [RepeatModification(), StopwordModification()] # # Maximum of 50% of words perturbed (δ in the paper). # constraints.append(MaxWordsPerturbed(max_percent=0.5)) # # Maximum thought vector Euclidean distance of λ_1 = 0.2. (eq. 4) # constraints.append( ThoughtVector(embedding_type='paragramcf', threshold=0.2, metric='max_euclidean')) # # # Maximum language model log-probability difference of λ_2 = 2. (eq. 5) # constraints.append(GPT2(max_log_prob_diff=2.0)) # # Goal is untargeted classification: reduce original probability score # to below τ = 0.7 (Algorithm 1). # goal_function = UntargetedClassification(model, target_max_score=0.7) # # Perform word substitution with a genetic algorithm. # search_method = GreedySearch() return Attack(goal_function, constraints, transformation, search_method)
def Pruthi2019(model, max_num_word_swaps=1): """ An implementation of the attack used in "Combating Adversarial Misspellings with Robust Word Recognition", Pruthi et al., 2019. This attack focuses on a small number of character-level changes that simulate common typos. It combines: - Swapping neighboring characters - Deleting characters - Inserting characters - Swapping characters for adjacent keys on a QWERTY keyboard. https://arxiv.org/abs/1905.11268 :param model: Model to attack. :param max_num_word_swaps: Maximum number of modifications to allow. """ # a combination of 4 different character-based transforms # ignore the first and last letter of each word, as in the paper transformation = CompositeTransformation( [ WordSwapNeighboringCharacterSwap( random_one=False, skip_first_char=True, skip_last_char=True ), WordSwapRandomCharacterDeletion( random_one=False, skip_first_char=True, skip_last_char=True ), WordSwapRandomCharacterInsertion( random_one=False, skip_first_char=True, skip_last_char=True ), WordSwapQWERTY(random_one=False, skip_first_char=True, skip_last_char=True), ] ) # only edit words of length >= 4, edit max_num_word_swaps words. # note that we also are not editing the same word twice, so # max_num_word_swaps is really the max number of character # changes that can be made. The paper looks at 1 and 2 char attacks. constraints = [ MinWordLength(min_length=4), StopwordModification(), MaxWordsPerturbed(max_num_words=max_num_word_swaps), RepeatModification(), ] # untargeted attack goal_function = UntargetedClassification(model) search_method = GreedySearch() return Attack(goal_function, constraints, transformation, search_method)
def build(model): transformation = CompositeTransformation([ WordSwapExtend(), WordSwapContract(), WordSwapChangeName(), WordSwapChangeNumber(), WordSwapChangeLocation(), ]) # Need this constraint to prevent extend and contract modifying each others' changes and forming infinite loop constraints = [RepeatModification()] # Untargeted attack & GreedySearch goal_function = UntargetedClassification(model) search_method = GreedySearch() return Attack(goal_function, constraints, transformation, search_method)
def build(model): # # Goal is to minimize BLEU score between the model output given for the # perturbed input sequence and the reference translation # goal_function = MinimizeBleu(model) # Swap words with their inflections transformation = WordSwapInflections() # # Don't modify the same word twice or stopwords # constraints = [RepeatModification(), StopwordModification()] # # Greedily swap words (see pseudocode, Algorithm 1 of the paper). # search_method = GreedySearch() return Attack(goal_function, constraints, transformation, search_method)
def build(model): # "This paper presents CLARE, a ContextuaLized AdversaRial Example generation model # that produces fluent and grammatical outputs through a mask-then-infill procedure. # CLARE builds on a pre-trained masked language model and modifies the inputs in a context-aware manner. # We propose three contex-tualized perturbations, Replace, Insert and Merge, allowing for generating outputs of # varied lengths." # # "We experiment with a distilled version of RoBERTa (RoBERTa_{distill}; Sanh et al., 2019) # as the masked language model for contextualized infilling." # Because BAE and CLARE both use similar replacement papers, we use BAE's replacement method here. shared_masked_lm = transformers.AutoModelForCausalLM.from_pretrained( "distilroberta-base") shared_tokenizer = transformers.AutoTokenizer.from_pretrained( "distilroberta-base") transformation = CompositeTransformation([ WordSwapMaskedLM( method="bae", masked_language_model=shared_masked_lm, tokenizer=shared_tokenizer, max_candidates=50, min_confidence=5e-4, ), WordInsertionMaskedLM( masked_language_model=shared_masked_lm, tokenizer=shared_tokenizer, max_candidates=50, min_confidence=0.0, ), WordMergeMaskedLM( masked_language_model=shared_masked_lm, tokenizer=shared_tokenizer, max_candidates=50, min_confidence=5e-3, ), ]) # # Don't modify the same word twice or stopwords. # constraints = [RepeatModification(), StopwordModification()] # "A common choice of sim(·,·) is to encode sentences using neural networks, # and calculate their cosine similarity in the embedding space (Jin et al., 2020)." # The original implementation uses similarity of 0.7. use_constraint = UniversalSentenceEncoder( threshold=0.7, metric="cosine", compare_against_original=True, window_size=15, skip_text_shorter_than_window=True, ) constraints.append(use_constraint) # Goal is untargeted classification. # "The score is then the negative probability of predicting the gold label from f, using [x_{adv}] as the input" goal_function = UntargetedClassification(model) # "To achieve this, we iteratively apply the actions, # and first select those minimizing the probability of outputting the gold label y from f." # # "Only one of the three actions can be applied at each position, and we select the one with the highest score." # # "Actions are iteratively applied to the input, until an adversarial example is found or a limit of actions T # is reached. # Each step selects the highest-scoring action from the remaining ones." # search_method = GreedySearch() return Attack(goal_function, constraints, transformation, search_method)
def attack_from_queue(args, in_queue, out_queue): gpu_id = torch.multiprocessing.current_process()._identity[0] - 2 set_env_variables(gpu_id) config = BertConfig.from_pretrained("hfl/chinese-macbert-base") # "hfl/chinese-macbert-base" config.output_attentions = False config.output_token_type_ids = False # config.max_length = 30 tokenizer = BertTokenizer.from_pretrained("hfl/chinese-macbert-base", config=config) config = AutoConfig.from_pretrained( './models/roberta/chinese-roberta-wwm-ext-OCNLI-2021-01-05-23-46-02-975289', num_labels=3 ) # for normal model = AutoModelForSequenceClassification.from_pretrained( './models/roberta/chinese-roberta-wwm-ext-OCNLI-2021-01-05-23-46-02-975289', config=config, ) model_wrapper = HuggingFaceModelWrapper(model, tokenizer, batch_size=24) # for normal # shared_masked_lm = BertModel.from_pretrained( # "bert-base-chinese" # ) # for mask!!! shared_masked_lm = AutoModelForMaskedLM.from_pretrained( "bert-base-chinese" ) shared_tokenizer = BertTokenizer.from_pretrained( "bert-base-chinese" ) transformation = CompositeTransformation( [ WordSwapMaskedLM( method="bae", masked_language_model=shared_masked_lm, tokenizer=shared_tokenizer, max_candidates=5, min_confidence=5e-4, ), WordInsertionMaskedLM( masked_language_model=shared_masked_lm, tokenizer=shared_tokenizer, max_candidates=5, min_confidence=0.0, ), WordMergeMaskedLM( masked_language_model=shared_masked_lm, tokenizer=shared_tokenizer, max_candidates=5, min_confidence=5e-3, ), ] ) # goal function goal_function = UntargetedClassification(model_wrapper) # constraints stopwords = set( ["个", "关于", "之上", "across", "之后", "afterwards", "再次", "against", "ain", "全部", "几乎", "单独", "along", "早已", "也", "虽然", "是", "among", "amongst", "一个", "和", "其他", "任何", "anyhow", "任何人", "anything", "anyway", "anywhere", "are", "aren", "没有", "around", "as", "at", "后", "been", "之前", "beforehand", "behind", "being", "below", "beside", "besides", "之間", "beyond", "皆是", "但", "by", "可以", "不可以", "是", "不是", "couldn't", "d", "didn", "didn't", "doesn", "doesn't", "don", "don't", "down", "due", "之間", "either", "之外", "elsewhere", "空", "足夠", "甚至", "ever", "任何人", "everything", "everywhere", "except", "first", "for", "former", "formerly", "from", "hadn", "hadn't", "hasn", "hasn't", "haven", "haven't", "he", "hence", "her", "here", "hereafter", "hereby", "herein", "hereupon", "hers", "herself", "him", "himself", "his", "how", "however", "hundred", "i", "if", "in", "indeed", "into", "is", "isn", "isn't", "it", "it's", "its", "itself", "just", "latter", "latterly", "least", "ll", "may", "me", "meanwhile", "mightn", "mightn't", "mine", "more", "moreover", "most", "mostly", "must", "mustn", "mustn't", "my", "myself", "namely", "needn", "needn't", "neither", "never", "nevertheless", "next", "no", "nobody", "none", "noone", "nor", "not", "nothing", "now", "nowhere", "o", "of", "off", "on", "once", "one", "only", "onto", "or", "other", "others", "otherwise", "our", "ours", "ourselves", "out", "over", "per", "please", "s", "same", "shan", "shan't", "she", "she's", "should've", "shouldn", "shouldn't", "somehow", "something", "sometime", "somewhere", "such", "t", "than", "that", "that'll", "the", "their", "theirs", "them", "themselves", "then", "thence", "there", "thereafter", "thereby", "therefore", "therein", "thereupon", "these", "they", "this", "those", "through", "throughout", "thru", "thus", "to", "too", "toward", "towards", "under", "unless", "until", "up", "upon", "used", "ve", "was", "wasn", "wasn't", "we", "were", "weren", "weren't", "what", "whatever", "when", "whence", "whenever", "where", "whereafter", "whereas", "whereby", "wherein", "whereupon", "wherever", "whether", "which", "while", "whither", "who", "whoever", "whole", "whom", "whose", "why", "with", "within", "without", "won", "won't", "would", "wouldn", "wouldn't", "y", "yet", "you", "you'd", "you'll", "you're", "you've", "your", "yours", "yourself", "yourselves"] ) constraints = [RepeatModification(), StopwordModification()] # input_column_modification = InputColumnModification( # ["premise", "hypothesis"], {"premise"} # ) # constraints.append(input_column_modification) # constraints.append(WordEmbeddingDistance(min_cos_sim=0.5)) use_constraint = UniversalSentenceEncoder( threshold=0.7, metric="cosine", compare_against_original=True, window_size=15, skip_text_shorter_than_window=True, ) constraints.append(use_constraint) # constraints = [ # MaxWordsPerturbed(5), # ] # transformation # transformation = WordSwapMaskedLM(method="bae", max_candidates=50, min_confidence=0.0) # transformation = WordSwapEmbedding(max_candidates=10) # transformation = WordDeletion() # search methods # search_method = GreedyWordSwapWIR(wir_method="delete") search_method = GreedySearch() textattack.shared.utils.set_seed(args.random_seed) attack = Attack(goal_function, constraints, transformation, search_method) # attack = parse_attack_from_args(args) if gpu_id == 0: print(attack, "\n") while not in_queue.empty(): try: i, text, output = in_queue.get() results_gen = attack.attack_dataset([(text, output)]) result = next(results_gen) out_queue.put((i, result)) except Exception as e: out_queue.put(e) exit()
# Import the dataset # Greedy Search attack from textattack.search_methods import GreedySearch from textattack.constraints.pre_transformation import RepeatModification, StopwordModification from textattack.shared import Attack from textattack.loggers import FileLogger # tracks a dataframe for us. from textattack.attack_results import SuccessfulAttackResult # We're going to use our Banana word swap class as the attack transformation. transformation = LoveWordSwap() # We'll constrain modification of already modified indices and stopwords constraints = [RepeatModification(), StopwordModification()] # We'll use the Greedy search method search_method = GreedySearch() # Now, let's make the attack from the 4 components: attack = Attack(goal_function, constraints, transformation, search_method) print(attack) from textattack.datasets import HuggingFaceNlpDataset dataset = HuggingFaceNlpDataset("ag_news", None, "test") print(dataset) # results_iterable = attack.attack_dataset(dataset) # # # # logger = FileLogger(filename=cwd + '/data/processed/attack_logger.csv') #