def transform(model_name, experiment, tokens):
    if model_name == 'svm':
        pipeline = pickle.load(open(
            os.path.join(get_experiment_path(experiment), 'models',
                         f'{model_name}.pkl'), 'rb'),
                               encoding='latin1')
    else:
        pipeline = pickle.load(
            open(
                os.path.join(get_experiment_path(experiment), 'models',
                             f'{model_name}.pkl'), 'rb'))
    tfidf = pipeline.named_steps['tfidf']
    return tfidf.transform(tokens)
def load_features_and_scores(model, ex_method, experiment):
    path = os.path.join(get_experiment_path(experiment), 'explanations')
    if ex_method == 'builtin':
        with open(os.path.join(path, 'features', f'{model}_{ex_method}_all_features.pkl'), 'rb') as f:
            c = pickle.load(f)
            features = c.keys()
            scores = list(c.values())
        return features, scores
    else:
        with open(os.path.join(path, 'features', f'{model}_{ex_method}_all_features.pkl'), 'rb') as f:
            features = pickle.load(f)
        with open(os.path.join(path, 'feature_importance', f'{model}_{ex_method}_all_scores.pkl'), 'rb') as f:
            scores = pickle.load(f)
        # if 'bert' in model:
        #     with open(os.path.join(path, 'features', f'xgboost_lime_all_features.pkl'), 'rb') as f:
        #         tweet_count = len(pickle.load(f))
        #     _f = ['_' for _ in range(tweet_count)]
        #     _s = [[0] for _ in range(tweet_count)]
        #     tweet_ids = get_explainable_tweet_ids()
        #     for f, s, i in zip(features, scores, tweet_ids):
        #         _f[i] = f
        #         _s[i] = s
        #     features = _f
        #     scores = _s
    return list(map(lambda i: i.split(), features)), scores
Beispiel #3
0
def get_fastbert_model(experiment):
    data_path = os.path.join(get_repo_path(), '_data', 'as_csv',
                             experiment.name)

    databunch = BertDataBunch(data_path,
                              data_path,
                              tokenizer='bert-base-uncased',
                              train_file='train.csv',
                              val_file='val.csv',
                              label_file='labels.csv',
                              text_col='text',
                              label_col='label',
                              batch_size_per_gpu=8,
                              max_seq_length=512,
                              multi_gpu=True,
                              multi_label=False,
                              model_type='bert')

    fastbert = BertLearner.from_pretrained_model(
        databunch,
        pretrained_path=os.path.join(get_experiment_path(experiment), 'models',
                                     'fastbert'),
        metrics=[{
            'name': 'accuracy',
            'function': accuracy
        }],
        device=torch.device("cuda"),
        logger=logging.getLogger(),
        output_dir='output')
    return fastbert
def which_classes_are_explainable(k=5):
    labels = ['TOTO', 'TMTO', 'TOTM', 'TMTM']
    explainable_sexist = np.array([])
    explainable_non_sexist = np.array([])
    for experiment in Experiments:
        with open(
                os.path.join(get_experiment_path(experiment),
                             f'explainable_tweets_k{k}.pkl'), 'rb') as f:
            explainable = pickle.load(f)
        explainable_sexist = np.append(
            explainable_sexist, len([e for e in explainable if e.label == 1]))
        explainable_non_sexist = np.append(
            explainable_non_sexist,
            len([e for e in explainable if e.label == 0]))
    explainable_sexist = explainable_sexist / (explainable_sexist +
                                               explainable_non_sexist)
    red, green = [plt.cm.Reds(0.5), plt.cm.Greens(0.4)]
    plt.bar(labels, [1] * len(explainable_sexist),
            color=green,
            label='Non-Sexist')
    plt.bar(labels, explainable_sexist, color=red, label='Sexist')
    plt.ylabel('Proportion of explainable tweets')
    plt.title('Which classes are explainable?')
    plt.legend()
    current_fig = plt.gcf()
    plt.show()
Beispiel #5
0
        logging.info(f"# Start Experiment {i + 1}/{number_of_experiments}: {experiment.name} #")
        ud = UnsexData(experiment)

        logging.info(f'Experiment {i + 1}/{number_of_experiments}: Load data..')
        X_train, X_test, y_train, y_test = ud.get_preprocessed_data()
        # ud.save_as_csv()

        logging.info(f'Experiment {i + 1}/{number_of_experiments}: Start training..')
        train_all(X_train, X_test, y_train, y_test, experiment=experiment)

        logging.info(f'Experiment {i + 1}/{number_of_experiments}: Start explaining..')
        explain_all(X_train, X_test, experiment=experiment)

        # save used training data
        logging.info(f'Experiment {i + 1}/{number_of_experiments}: Saving used data..')
        path = os.path.join(get_experiment_path(experiment), 'used_data')
        pickle.dump(X_train, open(os.path.join(path, 'X_train.pkl'), "wb"))
        pickle.dump(X_test, open(os.path.join(path, 'X_test.pkl'), "wb"))
        pickle.dump(ud.get_raw_test_tweets(), open(os.path.join(path, 'X_test_raw.pkl'), "wb"))
        pickle.dump(y_train, open(os.path.join(path, 'y_train.pkl'), "wb"))
        pickle.dump(y_test, open(os.path.join(path, 'y_test.pkl'), "wb"))

        # create pickle with ExplainableTweets for a configured set of tweets
        # ALL_TWEETS = range(len(X_test))
        JUST_WITH_LIME_EXP = get_explainable_tweet_ids(experiment)
        for k in ks:
            logging.info(f'Experiment {i + 1}/{number_of_experiments}: Saving ExplainableTweets..')

            tweet_loader = TweetLoader(experiment)
            explanation_loader = ExplanationLoader(experiment, tweet_loader=tweet_loader)
            trained_model_loader = TrainedModelLoader(experiment)
def which_datasets_are_explainable(k=5):
    labels = ['TOTO', 'TMTO', 'TOTM', 'TMTM']
    explainable_b = np.array([])
    explainable_h = np.array([])
    explainable_o = np.array([])
    explainable_c = np.array([])
    explainable_s = np.array([])
    for experiment in tqdm(Experiments):
        with open(
                os.path.join(get_experiment_path(experiment),
                             f'explainable_tweets_k{k}.pkl'), 'rb') as f:
            explainable = pickle.load(f)
        explainable_b = np.append(
            explainable_b,
            len([
                e for e in tqdm(explainable)
                if _get_dataset_of_tweet(e) == 'benevolent'
            ]))
        explainable_h = np.append(
            explainable_h,
            len([
                e for e in tqdm(explainable)
                if _get_dataset_of_tweet(e) == 'hostile'
            ]))
        explainable_o = np.append(
            explainable_o,
            len([
                e for e in tqdm(explainable)
                if _get_dataset_of_tweet(e) == 'other'
            ]))
        explainable_c = np.append(
            explainable_c,
            len([
                e for e in tqdm(explainable)
                if _get_dataset_of_tweet(e) == 'callme'
            ]))
        explainable_s = np.append(
            explainable_s,
            len([
                e for e in tqdm(explainable)
                if _get_dataset_of_tweet(e) == 'scales'
            ]))
    explainable_b = explainable_b / len(explainable)
    explainable_h = explainable_h / len(explainable)
    explainable_o = explainable_o / len(explainable)
    explainable_c = explainable_c / len(explainable)
    explainable_s = explainable_s / len(explainable)

    c1, c2, c3, c4, c5 = plt.cm.Set1.colors[:5]
    plt.bar(labels, [1] * len(explainable_b), color=c1, label='benevolent')
    plt.bar(labels,
            explainable_h + explainable_o + explainable_c + explainable_s,
            color=c2,
            label='hostile')
    plt.bar(labels,
            explainable_o + explainable_c + explainable_s,
            color=c3,
            label='other')
    plt.bar(labels, explainable_c + explainable_s, color=c4, label='callme')
    plt.bar(labels, explainable_s, color=c5, label='scales')
    plt.ylabel('Proportion of explainable tweets')
    plt.title('Which datasets are explainable?')
    plt.legend()
    current_fig = plt.gcf()
    plt.show()
    current_fig.savefig(
        os.path.join(get_repo_path(), '_evaluation', 'graphs',
                     'which_datasets_are_explainable.png'))
def get_f1_score(model_name, experiment):
    df = pd.read_csv(
        os.path.join(get_experiment_path(experiment), 'reports',
                     f'{model_name}.csv'))
    return round(df.iloc[4]['f1-score'], 2)
def which_length_is_explainable(k=5):
    fig = plt.figure()
    for i, experiment in enumerate(Experiments):
        graph = fig.add_subplot(2, 2, i + 1)
        with open(
                os.path.join(get_experiment_path(experiment), 'used_data',
                             'X_test_raw.pkl'), 'rb') as f:
            all_raw = pickle.load(f)
        with open(
                os.path.join(get_experiment_path(experiment), 'used_data',
                             'X_test.pkl'), 'rb') as f:
            all_tokens = pickle.load(f)
        with open(
                os.path.join(get_experiment_path(experiment),
                             f'explainable_tweets_k{k}.pkl'), 'rb') as f:
            explainable = pickle.load(f)
            explainable_raw = [et.raw for et in explainable]
            explainable_tokens = [et.tokens for et in explainable]
        not_explainable_raw = np.setdiff1d(all_raw, explainable_raw)
        not_explainable_tokens = np.setdiff1d(all_tokens, explainable_tokens)
        result = {}
        result['all_raw'] = {}
        result['all_raw']['amount'] = len(all_raw)

        result['explainable'] = {}
        result['explainable']['amount'] = len(explainable_raw)
        result['explainable']['min_tokens'] = min(
            [len(t.split()) for t in explainable_tokens])
        result['explainable']['max_tokens'] = max(
            [len(t.split()) for t in explainable_tokens])
        result['explainable']['min_raw_length'] = min(
            [len(t) for t in explainable_raw])
        result['explainable']['max_raw_length'] = max(
            [len(t) for t in explainable_raw])

        xs = np.array(range(len(explainable_tokens))) / len(explainable_tokens)
        exp_ys = sorted([len(t.split()) for t in explainable_tokens])
        graph.plot(xs, exp_ys, label='explainable')

        result['not_explainable'] = {}
        result['not_explainable']['amount'] = len(not_explainable_raw)
        result['not_explainable']['min_tokens'] = min(
            [len(t.split()) for t in not_explainable_tokens])
        result['not_explainable']['max_tokens'] = max(
            [len(t.split()) for t in not_explainable_tokens])
        result['not_explainable']['min_raw_length'] = min(
            [len(t) for t in not_explainable_raw])
        result['not_explainable']['max_raw_length'] = max(
            [len(t) for t in not_explainable_raw])
        xs = np.array(range(
            len(not_explainable_tokens))) / len(not_explainable_tokens)
        unexp_ys = sorted([len(t.split()) for t in not_explainable_tokens])

        ttest_p = round(ttest_ind(exp_ys, unexp_ys).pvalue, 4)
        mwu_p = round(mannwhitneyu(exp_ys, unexp_ys).pvalue, 4)
        print(f"\n{experiment.name} T-Test, P-Value: ", ttest_p)
        print(f"{experiment.name} Mann-Whitney U Test: P-Value: ", mwu_p)

        graph.set_xlabel(f'$tweets$')
        graph.set_ylabel('$number\_of\_tokens$')

        graph.text(0.48,
                   1.5,
                   f'T-Test: p={ttest_p}\nMWU-Test: p={mwu_p}',
                   fontsize=8)

        if i + 1 in [1, 2]:
            graph.axes.xaxis.set_visible(False)
        if i + 1 in [2, 4]:
            graph.axes.yaxis.set_visible(False)

        graph.plot(xs, unexp_ys, label='unexplainable')
        graph.set_title(experiment.name, fontdict={'fontsize': 10})

    plt.legend()
    current_fig = plt.gcf()
    plt.show()
    current_fig.savefig(
        os.path.join(get_repo_path(), '_evaluation', 'graphs',
                     'which_length_is_explainable.png'))
Beispiel #9
0
def _get_accuracy(model_name, experiment):
    df = pd.read_csv(
        os.path.join(get_experiment_path(experiment), 'reports',
                     f'{model_name}.csv'))
    return round(df.iloc[2]['precision'], 2)
 def get_explainable_tweets(self, k=5):
     with open(
             os.path.join(get_experiment_path(self.experiment),
                          f'explainable_tweets_k{k}.pkl'), 'rb') as f:
         explainable_tweets = pickle.load(f)
     return explainable_tweets
def train(model,
          X_train,
          X_test,
          y_train,
          y_test,
          save_as=None,
          report=None,
          experiment=None):
    """
    Args:
        model (Model object): model to train
        X_train: Training data
        X_test: Test data
        y_train: Training labels
        y_test: Test labels
        save_as (String): if set model will be saved with this string as filename after training
        report (String): if set this method saves the classification report a csv file with that name

    Returns:
        classification report as pandas DataFrame (optional)
    """
    if model == 'fastbert':
        # fastbert
        # if save_as:
        #     model.fit(X_train, y_train, model_saving_path=os.path.join('models', f'{save_as}.bin'))
        # else:
        #     model.fit(X_train, y_train)
        # y_pred = []
        # for tweet in X_test:
        #     y_pred.append(model(tweet, speed=0.7)[0])

        # fast-bert
        # no training needed; already trained in google colab due to RAM issues
        model = get_fastbert_model(experiment)
        predictions = model.predict_batch(X_test)
        y_pred = np.array([int(p[0][0]) for p in predictions])
    else:
        pipeline = Pipeline(steps=[('tfidf',
                                    TfidfVectorizer()), ('classifier', model)])

        strtfdKFold = StratifiedKFold(n_splits=10)
        kfold = strtfdKFold.split(X_train, y_train)
        scores = []

        best_pipeline = None
        cross_validation_log = ''
        for k, (train, test) in enumerate(kfold):
            _X_train = [t for i, t in enumerate(X_train) if i in train]
            _X_test = [t for i, t in enumerate(X_train) if i in test]
            _y_train = [l for i, l in enumerate(y_train) if i in train]
            _y_test = [l for i, l in enumerate(y_train) if i in test]
            pipeline.fit(_X_train, _y_train)
            score = pipeline.score(_X_test, _y_test)
            scores.append(score)
            if score >= max(scores):
                best_pipeline = pipeline
            cross_validation_log += 'Fold: %2d, Training/Test Split Distribution: %s, Accuracy: %.3f\n' % (
                k + 1, np.bincount(_y_train), score)

        with open(
                os.path.join(get_experiment_path(experiment), 'models',
                             f'{save_as}_cross_val.txt'), 'w') as f:
            f.write(cross_validation_log)

        # pipeline.fit(X_train, y_train)
        y_pred = best_pipeline.predict(X_test)

        # save svm model trained on unsex data
        if save_as:
            pickle.dump(
                best_pipeline,
                open(
                    os.path.join(get_experiment_path(experiment), 'models',
                                 f'{save_as}.pkl'), 'wb'))
            logging.info(f'Model saved as {save_as}')

    if report:
        # print(confusion_matrix(y_test, y_pred))
        # print(classification_report(y_test, y_pred, target_names=['non-sexist', 'sexist']))
        df = pd.DataFrame(
            classification_report(y_test,
                                  y_pred,
                                  target_names=['non-sexist', 'sexist'],
                                  output_dict=True)).transpose()
        df.to_csv(
            os.path.join(get_experiment_path(experiment), 'reports',
                         f'{report}.csv'))