예제 #1
0
def length_count(X, use_clean):
    lens = ["len_{}".format(length) for length in range(10, 110, 10)]
    lens_cnt = defaultdict(list)
    assert use_clean
    for x in X:
        x = filter_stop_words(x)
        x_len = len(x)
        if x_len <= 10:
            lens_cnt["len_10"].append(x_len)
        if x_len <= 20:
            lens_cnt["len_20"].append(x_len)
        if x_len <= 30:
            lens_cnt["len_30"].append(x_len)
        if x_len <= 40:
            lens_cnt["len_40"].append(x_len)
        if x_len <= 50:
            lens_cnt["len_50"].append(x_len)
        if x_len <= 60:
            lens_cnt["len_60"].append(x_len)
        if x_len <= 70:
            lens_cnt["len_70"].append(x_len)
        if x_len <= 80:
            lens_cnt["len_80"].append(x_len)
        if x_len <= 90:
            lens_cnt["len_90"].append(x_len)
        if x_len <= 100:
            lens_cnt["len_100"].append(x_len)
    for size in lens:
        print("{}:{},avg_len:{}".format(size, len(lens_cnt[size]),
                                        int(np.average(lens_cnt[size]))))
예제 #2
0
def divide_imdb():
    full_data = load_pickle(
        get_path("data/training_data/imdb/full_data_50K.pkl"))
    data = {}
    X = []
    Y = []
    save_path = get_path(DataPath.IMDB.PROCESSED_DATA)
    save_wv_matrix_path = get_path(DataPath.IMDB.WV_MATRIX)
    for x, y in zip(full_data["x"], full_data["y"]):
        x = filter_stop_words(x)
        if len(x) <= 50:
            X.append(x)
            Y.append(y)

    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        Y,
                                                        test_size=0.2,
                                                        random_state=2020)
    data["train_x"] = X_train
    data["train_y"] = y_train
    data["test_x"], data["test_y"] = X_test, y_test
    data["vocab"] = full_data["vocab"]
    data["classes"] = full_data["classes"]
    data["word_to_idx"] = full_data["word_to_idx"]
    data["idx_to_word"] = full_data["idx_to_word"]

    save_pickle(save_path, data)
    save_pickle(save_wv_matrix_path, full_data["wv_matrix"])
    print("train_size:{},test_size:{}".format(len(y_train), len(y_test)))
예제 #3
0
파일: fsa.py 프로젝트: dgl-prc/rnn2automata
 def __init__(self, l1_traces, sentences, k, vocab, rnn, partitioner, use_clean):
     """
     :param l1_traces:
     :param sentences: this sentences should not include stops words if the dataset is real-world
     :param k:
     :param vob:
     :param rnn:
     :param partitioner:
     """
     self.k = k
     self.use_clean = use_clean
     self.l1_traces = self.fsa_format_L1(l1_traces)
     self.sentences = [filter_stop_words(sent) for sent in sentences] if self.use_clean else  sentences
     self.alphabet = filter_stop_words(vocab) if self.use_clean else vocab
     self.states = [START_SYMBOL] + [str(i) for i in range(k)]
     self.rnn = rnn
     self.partitioner = partitioner
     self.trans_func, self.trans_wfunc, self.final_state = self.learn()
예제 #4
0
def train(model, data, params):
    device = params["device"]
    model = model.to(device)
    optimizer = optim.Adadelta(model.parameters(), params["LEARNING_RATE"])
    criterion = nn.CrossEntropyLoss()
    pre_metric_acc = 0
    max_metric_acc = 0
    for e in range(params["EPOCH"]):
        data["train_x"], data["train_y"] = shuffle(data["train_x"],
                                                   data["train_y"])
        i = 0
        model.train()
        for sent, c in zip(data["train_x"], data["train_y"]):
            if params["use_clean"]:
                sent = filter_stop_words(sent)
            label = [data["classes"].index(c)]
            label = torch.LongTensor(label).to(device)
            input_tensor = sent2tensor(sent, params["input_size"],
                                       data["word_to_idx"],
                                       params["WV_MATRIX"], device)
            optimizer.zero_grad()
            output, inner_states = model(input_tensor)
            lasthn = output[0][-1].unsqueeze(0)
            pred = model.h2o(lasthn)
            loss = criterion(pred, label)
            loss.backward()
            optimizer.step()
            if i % 500 == 0:
                print("Train Epoch: {} [{}/{}]\tLoss: {:.6f}".format(
                    e + 1, i + 1, len(data["train_x"]), loss))
            i += 1
        train_acc = test(data, model, params, mode="train", device=device)
        test_acc = test(data, model, params, mode="test", device=device)
        print("{}\tepoch:{}\ttrain_acc:{:.4f}\ttest_acc:{:.4f}".format(
            current_timestamp(), e + 1, train_acc, test_acc))
        metric_acc = (test_acc + train_acc) / 2
        if params["EARLY_STOPPING"] and metric_acc <= pre_metric_acc:
            print("early stopping by dev_acc!")
            break
        else:
            pre_metric_acc = metric_acc
        if metric_acc >= max_metric_acc:
            max_metric_acc = metric_acc
            best_model = copy.deepcopy(model)
            best_model.i2h.flatten_parameters()
    best_train_acc = test(data,
                          best_model,
                          params,
                          mode="train",
                          device=device)
    best_test_acc = test(data, best_model, params, mode="test", device=device)
    last_train_acc = test(data, model, params, mode="train", device=device)
    print("train_acc:{:.4f}, test acc:{:.4f}, last_train_acc:{}".format(
        best_train_acc, best_test_acc, last_train_acc))
    return best_model, best_train_acc, best_test_acc
예제 #5
0
파일: fsa.py 프로젝트: dgl-prc/rnn2automata
 def predict(self, sent):
     sent = filter_stop_words(sent) if self.use_clean else sent
     is_unspecified = False
     c_state = START_SYMBOL
     for sigma in sent:
         if sigma in self.trans_func[c_state]:
             c_state = self.trans_func[c_state][sigma]
         else:
             is_unspecified = True
             break
     pred = 1 if c_state in self.final_state else 0
     return pred, is_unspecified
예제 #6
0
def format_data(data_type, data, is_train_data):
    """ convert the list into a string.
    data_type:
    data: list(list).
    Return:
        list(string)
    """
    data_source = "train" if is_train_data else "test"

    if is_artificial(data_type):
        X = ["".join(w) for w in data["{}_x".format(data_source)]]
    else:
        X = []
        for w in data["{}_x".format(data_source)]:
            w = filter_stop_words(w)
            if len(w) != 0:
                X.append(MyString(w))
    return X, data["{}_y".format(data_source)]
예제 #7
0
def test(data, model, params, mode="test", device="cuda:0"):
    model.eval()
    if mode == "train":
        X, Y = data["train_x"], data["train_y"]
    elif mode == "test":
        X, Y = data["test_x"], data["test_y"]
    acc = 0
    for sent, c in zip(X, Y):
        if params["use_clean"]:
            sent = filter_stop_words(sent)
        input_tensor = sent2tensor(sent, params["input_size"],
                                   data["word_to_idx"], params["WV_MATRIX"],
                                   device)
        output, _ = model(input_tensor)
        # avg_h = torch.mean(output, dim=1, keepdim=False)
        lasthn = output[0][-1].unsqueeze(0)
        pred = model.h2o(lasthn)
        label = data["classes"].index(c)
        pred = np.argmax(pred.cpu().data.numpy(), axis=1)[0]
        acc += 1 if pred == label else 0
    return acc / len(X)
예제 #8
0
def make_ori_trace(model_type,
                   dataset,
                   device,
                   use_clean=False,
                   path_mode=STANDARD_PATH,
                   model_path=STANDARD_PATH):
    """

    model_type:
    dataset:
    device:
    use_clean: bool if true then filter the stop words, or keep the stop words
    :return:
    """
    input_dim = get_input_dim(dataset)

    if dataset.startswith("tomita"):
        gram_id = int(dataset[-1])
        data = load_pickle(
            get_path(getattr(DataPath, "TOMITA").PROCESSED_DATA).format(
                gram_id, gram_id))
        wv_matrix = load_pickle(
            get_path(getattr(DataPath,
                             "TOMITA").WV_MATRIX).format(gram_id, gram_id))
        model = load_model(model_type,
                           "tomita",
                           device=device,
                           load_model_path=model_path)
    else:
        model = load_model(model_type,
                           dataset,
                           device=device,
                           load_model_path=model_path)
        data = load_pickle(
            get_path(getattr(DataPath, dataset.upper()).PROCESSED_DATA))
        wv_matrix = load_pickle(
            get_path(getattr(DataPath, dataset.upper()).WV_MATRIX))

    word2idx = data["word_to_idx"]
    ori_traces = {}
    ori_traces["train_x"] = []
    ori_traces["test_x"] = []
    ori_traces["train_pre_y"] = []
    ori_traces["test_pre_y"] = []
    print("do extracting...")
    for x in data["train_x"]:
        if use_clean:
            x = filter_stop_words(x)
        tensor_sequence = sent2tensor(x, input_dim, word2idx, wv_matrix,
                                      device)
        hn_trace, label_trace = model.get_predict_trace(tensor_sequence)
        ori_traces["train_x"].append(hn_trace)
        ori_traces["train_pre_y"].append(label_trace[-1])

    for x in data["test_x"]:
        if use_clean:
            x = filter_stop_words(x)
        tensor_sequence = sent2tensor(x, input_dim, word2idx, wv_matrix,
                                      device)
        hn_trace, label_trace = model.get_predict_trace(tensor_sequence)
        ori_traces["test_x"].append(hn_trace)
        ori_traces["test_pre_y"].append(label_trace[-1])

    if path_mode == STANDARD_PATH:
        save_path = get_path(
            getattr(getattr(OriTrace, model_type.upper()), dataset.upper()))
    else:
        save_path = path_mode

    save_pickle(save_path, ori_traces)
    print("Saved to {}".format(save_path))
예제 #9
0
def prepare_L1_data(model_type,
                    data_type,
                    device,
                    partitioner,
                    pt_type,
                    bug_mode,
                    model_pth=STANDARD_PATH,
                    adv_path=STANDARD_PATH,
                    use_clean=False):
    if adv_path == STANDARD_PATH:
        adv_path = get_path(
            getattr(getattr(Application.AEs, data_type.upper()),
                    model_type.upper()).format(bug_mode))
    else:
        adv_path = get_path(adv_path)
    input_dim = 300
    ######################
    # load data and model
    #####################
    raw_data = load_pickle(
        get_path(getattr(DataPath, data_type.upper()).PROCESSED_DATA))
    adv_data = load_pickle(adv_path)
    wv_matrix = load_pickle(
        get_path(getattr(DataPath, data_type.upper()).WV_MATRIX))
    word2idx = raw_data["word_to_idx"]
    model = load_model(model_type, data_type, device, model_pth)

    ####################
    # extract ori trace
    ###################
    benign_traces = []
    adv_traces = []
    benign_labels = []
    adv_labels = []
    for ele in adv_data:
        idx, adv_x, adv_y = ele
        benign_x, benign_y = raw_data["test_x"][idx], raw_data["test_y"][idx]
        if use_clean:
            # note in this case, the adv is derived from the clean text,thus no filters needed.
            benign_x = filter_stop_words(benign_x)

        # benign
        benign_tensor = sent2tensor(benign_x, input_dim, word2idx, wv_matrix,
                                    device)
        benign_hn_trace, benign_label_trace = model.get_predict_trace(
            benign_tensor)
        benign_traces.append(benign_hn_trace)
        assert benign_y == benign_label_trace[-1]
        benign_labels.append(benign_y)

        # adv
        adv_tensor = sent2tensor(adv_x, input_dim, word2idx, wv_matrix, device)
        adv_hn_trace, adv_label_trace = model.get_predict_trace(adv_tensor)
        adv_traces.append(adv_hn_trace)
        assert adv_y == adv_label_trace[-1]
        adv_labels.append(adv_y)

    #############################
    # make level1 abstract traces
    #############################
    if pt_type == PartitionType.KMP:
        rnn = load_model(model_type, data_type, "cpu")
    else:
        rnn = None
    benign_abs_seqs = level1_abstract(rnn=rnn,
                                      rnn_traces=benign_traces,
                                      y_pre=benign_labels,
                                      partitioner=partitioner,
                                      partitioner_exists=True,
                                      partition_type=pt_type)
    adv_abs_seqs = level1_abstract(rnn=rnn,
                                   rnn_traces=adv_traces,
                                   y_pre=adv_labels,
                                   partitioner=partitioner,
                                   partitioner_exists=True,
                                   partition_type=pt_type)

    return benign_abs_seqs, adv_abs_seqs
예제 #10
0
def main(model_type,
         data_type,
         word2vec_model,
         check_p=-1,
         check_point_path=None,
         bug_mode=TextBugger.SUB_W,
         model_path=STANDARD_PATH,
         save_path=STANDARD_PATH,
         use_clean=False):
    max_size = 1000
    device = "cpu"
    input_dim = 300
    # omit_stopws = True if model_path != STANDARD_PATH else False
    model = load_model(model_type, data_type, device, model_path)
    data = load_pickle(
        get_path(getattr(DataPath, data_type.upper()).PROCESSED_DATA))
    WV_MATRIX = load_pickle(
        get_path(getattr(DataPath, data_type.upper()).WV_MATRIX))

    classifier = Classifier(model, model_type, input_dim, data["word_to_idx"],
                            WV_MATRIX, device)
    benign_data, benign_idx = select_benign_data(classifier, data)
    textbugger = TextBugger(classifier, word2vec_model, bug_mode)
    print("begin attacking....")
    adv_rst = []
    if check_p > 0:
        check_point_data = load_pickle(check_point_path)
        adv_rst = check_point_data["ADV_RST"]
        p = check_p
        benign_data, benign_idx = benign_data[p:], benign_idx[p:]
    else:
        p = 0
    check_point_folder = make_check_point_folder("attack_{}".format(data_type),
                                                 dataset=data_type,
                                                 modelType=model_type)
    # Note we can make sure the order of data is fixed since the order of processed raw data is fixed.
    # In our experiments, 1000 adversarial samples is enough.
    for item, idx in zip(benign_data, benign_idx):
        try:
            sentence, label = item
            if use_clean:
                sentence = filter_stop_words(sentence)
            if classifier.get_label(sentence) == label:
                newSent, newLabel = textbugger.attack(sentence)
                if newLabel != -1:
                    adv_rst.append((idx, newSent, newLabel))
                    p += 1
                    if p >= max_size:
                        break
        except UnicodeEncodeError as e:
            continue
        sys.stdout.write("\rattacking   {:.2f}%".format(100 * p / max_size))
        sys.stdout.flush()
        if p % 50 == 0:
            check_point_data = {"ADV_RST": adv_rst}
            save_pickle(
                os.path.join(check_point_folder,
                             "check_point-{}.pkl".format(p)), check_point_data)
    if save_path == STANDARD_PATH:
        save_path = getattr(getattr(Application.AEs, data_type.upper()),
                            model_type.upper())
        save_path = get_path(save_path.format(bug_mode))

    save_pickle(save_path, adv_rst)
    print("Done! saved in {}".format(save_path))