def run_experiment(dataset, algorithm, explainer):
  evaluator = ExplanationEvaluator(classifier_names=[algorithm])
  evaluator.load_datasets([dataset])
  evaluator.vectorize_and_train()
  explain_fn = None


  print 'Explainer:', explainer
  if explainer == 'lime':
    rho = 25
    kernel = lambda d: np.sqrt(np.exp(-(d**2) / rho ** 2))
    explainer = explainers.GeneralizedLocalExplainer(kernel, explainers.data_labels_distances_mapping_text, num_samples=15000, return_mean=False, verbose=False, return_mapped=True)
    explain_fn = explainer.explain_instance
  elif explainer == 'parzen':
    sigmas = {'multi_polarity_electronics': {'tree': 0.5,
    'l1logreg': 1},
    'multi_polarity_kitchen': {'tree': 0.75, 'l1logreg': 2.0},
    'multi_polarity_dvd': {'tree': 8.0, 'l1logreg': 1},
    'multi_polarity_books': {'tree': 2.0, 'l1logreg': 2.0}}

    explainer = parzen_windows.ParzenWindowClassifier()
    cv_preds = cross_val_predict(evaluator.classifiers[dataset][algorithm], evaluator.train_vectors[dataset], evaluator.train_labels[dataset])
    explainer.fit(evaluator.train_vectors[dataset], cv_preds)
    explainer.sigma = sigmas[dataset][algorithm]
    explain_fn = explainer.explain_instance
  elif explainer == 'greedy':
    explain_fn = explainers.explain_greedy
  elif explainer == 'random':
    explainer = explainers.RandomExplainer()
    explain_fn = explainer.explain_instance
  train_results, test_results = evaluator.measure_explanation_hability(explain_fn)
  out = {'train': train_results[dataset][algorithm], 'test' : test_results[dataset][algorithm]}
  # Return mean of recalls and invidivual train and test recalls
  recall = np.mean(test_results[dataset][algorithm])
  print 'Recall:', recall  
  return recall, out
def main():
    parser = argparse.ArgumentParser(description='Evaluate some explanations')
    parser.add_argument('--dataset',
                        '-d',
                        type=str,
                        required=True,
                        help='dataset name')
    parser.add_argument('--algorithm',
                        '-a',
                        type=str,
                        required=True,
                        help='algorithm_name')
    parser.add_argument('--explainer',
                        '-e',
                        type=str,
                        required=True,
                        help='explainer name')
    args = parser.parse_args()
    dataset = args.dataset
    algorithm = args.algorithm
    evaluator = ExplanationEvaluator(classifier_names=[algorithm])
    evaluator.load_datasets([dataset])
    evaluator.vectorize_and_train()
    explain_fn = None
    if args.explainer == 'lime':
        rho = 25
        kernel = lambda d: np.sqrt(np.exp(-(d**2) / rho**2))
        explainer = explainers.GeneralizedLocalExplainer(
            kernel,
            explainers.data_labels_distances_mapping_text,
            num_samples=15000,
            return_mean=False,
            verbose=False,
            return_mapped=True)
        explain_fn = explainer.explain_instance
    elif args.explainer == 'parzen':
        sigmas = {
            'multi_polarity_electronics': {
                'tree': 0.5,
                'l1logreg': 1
            },
            'multi_polarity_kitchen': {
                'tree': 0.75,
                'l1logreg': 2.0
            },
            'multi_polarity_dvd': {
                'tree': 8.0,
                'l1logreg': 1
            },
            'multi_polarity_books': {
                'tree': 2.0,
                'l1logreg': 2.0
            }
        }

        explainer = parzen_windows.ParzenWindowClassifier()
        cv_preds = sklearn.cross_validation.cross_val_predict(
            evaluator.classifiers[dataset][algorithm],
            evaluator.train_vectors[dataset], evaluator.train_labels[dataset])
        explainer.fit(evaluator.train_vectors[dataset], cv_preds)
        explainer.sigma = sigmas[dataset][algorithm]
        explain_fn = explainer.explain_instance
    elif args.explainer == 'greedy':
        explain_fn = explainers.explain_greedy
    elif args.explainer == 'random':
        explainer = explainers.RandomExplainer()
        explain_fn = explainer.explain_instance
    train_results, test_results = evaluator.measure_explanation_hability(
        explain_fn)
    print('Average test: ', np.mean(test_results[dataset][algorithm]))
    out = {
        'train': train_results[dataset][algorithm],
        'test': test_results[dataset][algorithm]
    }
    print(out)
def main():
  parser = argparse.ArgumentParser(description='Evaluate some explanations')
  parser.add_argument('--dataset', '-d', type=str, required=True,help='dataset name')
  parser.add_argument('--algorithm1', '-a1', type=str, required=True, help='algorithm_name')
  parser.add_argument('--algorithm2', '-a2', type=str, required=True, help='algorithm_name')
  parser.add_argument('--num_features', '-k', type=int, required=True, help='num features')
  parser.add_argument('--percent_untrustworthy',  '-u', type=float, required=True, help='percentage of untrustworthy features. like 0.1')
  parser.add_argument('--num_rounds', '-r', type=int, required=True, help='num rounds')
  args = parser.parse_args()
  dataset = args.dataset
  train_data, train_labels, test_data, test_labels, class_names = LoadDataset(dataset)

  vectorizer = CountVectorizer(lowercase=False, binary=True) 
  train_vectors = vectorizer.fit_transform(train_data)
  test_vectors = vectorizer.transform(test_data)
  terms = np.array(list(vectorizer.vocabulary_.keys()))
  indices = np.array(list(vectorizer.vocabulary_.values()))
  inverse_vocabulary = terms[np.argsort(indices)]

  np.random.seed(1)
  classifier_a = get_classifier(args.algorithm1, vectorizer)
  classifier_a.fit(train_vectors, train_labels)
  classifier_a_pipeline = make_pipeline(vectorizer, classifier_a)

  classifier_b = get_classifier(args.algorithm2, vectorizer)
  classifier_b.fit(train_vectors, train_labels)
  classifier_b_pipeline = make_pipeline(vectorizer, classifier_b)

  np.random.seed(1)
  untrustworthy_rounds = []
  all_features = range(train_vectors.shape[1])
  num_untrustworthy = int(train_vectors.shape[1] * args.percent_untrustworthy)
  for _ in range(args.num_rounds):
    untrustworthy_rounds.append(np.random.choice(all_features, num_untrustworthy, replace=False))
  
  rho = 25
  kernel = lambda d: np.sqrt(np.exp(-(d**2) / rho ** 2))

  # simple_LIME = explainers.GeneralizedLocalExplainer(kernel, explainers.data_labels_distances_mapping_text, num_samples=15000, return_mean=True, verbose=False, return_mapped=True)
  LIME = LimeTextExplainer(class_names=class_names, mode="classification") 

  ridge_regressor = Ridge(alpha=1, fit_intercept=True, random_state=0)
  model_regressor = Ridge(alpha=1, fit_intercept=True, random_state=0)
  regressor_requires_positive_values=False

  sigmas = {'multi_polarity_electronics': {'neighbors': 0.75, 'svm': 10.0, 'tree': 0.5,
  'logreg': 0.5, 'random_forest': 0.5, 'embforest': 0.75},
  'multi_polarity_kitchen': {'neighbors': 1.0, 'svm': 6.0, 'tree': 0.75,
  'logreg': 0.25, 'random_forest': 6.0, 'embforest': 1.0},
  'multi_polarity_dvd': {'neighbors': 0.5, 'svm': 0.75, 'tree': 8.0, 'logreg':
  0.75, 'random_forest': 0.5, 'embforest': 5.0}, 'multi_polarity_books':
  {'neighbors': 0.5, 'svm': 7.0, 'tree': 2.0, 'logreg': 1.0, 'random_forest':
  1.0, 'embforest': 3.0}, '2ng': {'neighbors': 1.0, 'svm': 6.0, 'tree': 0.75,
  'logreg': 0.25, 'random_forest': 6.0, 'embforest': 1.0}}

  random = explainers.RandomExplainer()
  exps = {}
  explainer_names = ['DiffLIME', 'ContrastLIME', 'random', 'greedy']
  for expl in explainer_names:
    exps[expl] = []

  predictions_a = classifier_a.predict(test_vectors)
  predict_probas_a = classifier_a.predict_proba(test_vectors)[:,1]
  predictions_b = classifier_b.predict(test_vectors)

  disagreements = np.array(predictions_a != predictions_b, dtype=int)

  predict_probas_b = classifier_b.predict_proba(test_vectors)[:,1]

  LARGE_NUM_OF_FEATURES=200

  for i in range(test_vectors.shape[0]):
    print(i)
    sys.stdout.flush()

    # Doesn't need to change between single-model and contrastive LIME.
    exp = random.explain_instance(test_vectors[i], 1, None, args.num_features, None)
    exps['random'].append(exp)

    # Compute Diff-LIME
    class_exp_a = LIME.explain_instance(test_data[i],
                                      classifier_a_pipeline.predict_proba,
                                      num_features=LARGE_NUM_OF_FEATURES,
                                      model_regressor=ridge_regressor)
    lime_exp_a = [(vectorizer.vocabulary_.get(w, None), weight) for w, weight in class_exp_a.as_list() if w in vectorizer.vocabulary_]
    lime_exp_a_dict = dict(lime_exp_a)
    lime_keys_a = set(lime_exp_a_dict.keys())
    lime_score_a = class_exp_a.score
    class_exp_b = LIME.explain_instance(test_data[i],
                                      classifier_b_pipeline.predict_proba,
                                      num_features=LARGE_NUM_OF_FEATURES,
                                      model_regressor=ridge_regressor)
    lime_exp_b = [(vectorizer.vocabulary_.get(w, None), weight) for w, weight in class_exp_b.as_list() if w in vectorizer.vocabulary_]
    lime_exp_b_dict = dict(lime_exp_b)
    lime_keys_b = set(lime_exp_b_dict.keys())
    lime_score_b = class_exp_b.score
    combined_lime_keys = lime_keys_a.union(lime_keys_b)
    diff_lime_exp = []
    for word_idx in combined_lime_keys:
      lime_difference = lime_exp_b_dict.get(word_idx, 0.0) - lime_exp_a_dict.get(word_idx, 0.0)
      diff_lime_exp.append((word_idx, lime_difference))
    # Sort by difference of LIMEs
    diff_lime_exp.sort(key = lambda x: np.abs(x[1]), reverse=True)
    diff_lime_exp = diff_lime_exp[:args.num_features]
    assert lime_score_a.keys() == lime_score_b.keys()
    diff_lime_score = {k: lime_score_b[k] - lime_score_a[k] for k in lime_score_a}
    exps['DiffLIME'].append((diff_lime_exp, diff_lime_score))
    
    # Compute ContrastLime

    contrastlime_class_exp = LIME.explain_instance_contrast(test_data[i],
                                      classifier_a_pipeline.predict_proba,
                                      classifier_b_pipeline.predict_proba,
                                      num_features=args.num_features,
                                      model_regressor=model_regressor,
                                      regressor_requires_positive_values=regressor_requires_positive_values)
    contrastlime_exp = [(vectorizer.vocabulary_.get(w, None), weight) for w, weight in contrastlime_class_exp.as_list() if w in vectorizer.vocabulary_]
    contrastlime_score = contrastlime_class_exp.score
    exps['ContrastLIME'].append((contrastlime_exp, contrastlime_score))

    exp = explainers.explain_contrast_greedy_martens(test_vectors[i],
                                                     disagreements[i],
                                                     classifier_a.predict_proba,
                                                     classifier_b.predict_proba,
                                                     args.num_features)
    exps['greedy'].append(exp)

  precision = {}
  recall = {}
  f1 = {}

  neg_precision = {}
  neg_recall = {}
  neg_f1 = {}

  macro_precision = {}
  macro_recall = {}
  macro_f1 = {}

  for name in explainer_names:
    precision[name] = []
    recall[name] = []
    f1[name] = []
    neg_precision[name] = []
    neg_recall[name] = []
    neg_f1[name] = []
    macro_precision[name] = []
    macro_recall[name] = []
    macro_f1[name] = []
  flipped_preds_size = []
  for untrustworthy in untrustworthy_rounds:
    t = test_vectors.copy()
    t[:, untrustworthy] = 0

    disagreement_predictions_originals = classifier_a.predict(test_vectors) != classifier_b.predict(test_vectors)
    disagreement_predictions_updated = classifier_a.predict(t) != classifier_b.predict(t)
    mistrust_idx = np.argwhere(disagreement_predictions_originals != disagreement_predictions_updated).flatten()

    print('Number of suspect predictions ', len(mistrust_idx))
    shouldnt_trust = set(mistrust_idx)
    flipped_preds_size.append(len(shouldnt_trust))
    mistrust = collections.defaultdict(lambda:set())
    trust = collections.defaultdict(lambda: set())
    trust_fn = lambda prev, curr: (prev > 0.5 and curr > 0.5) or (prev <= 0.5 and curr <= 0.5)
    trust_fn_all = lambda exp, unt: len([x[0] for x in exp if x[0] in unt]) == 0
    for i in range(test_vectors.shape[0]):
      prev_tot = predict_probas_b[i] - predict_probas_a[i]

      exp, mean = exps['DiffLIME'][i]
      assert list(mean.keys()) == [1]
      prev_tot2 = sum([np.abs(x[1]) for x in exp]) + np.abs(mean[1])
      tot = prev_tot2 - sum([np.abs(x[1]) for x in exp if x[0] in untrustworthy])
      trust['DiffLIME'].add(i) if trust_fn(tot, prev_tot) else mistrust['DiffLIME'].add(i)



      exp, mean = exps['ContrastLIME'][i]
      assert list(mean.keys()) == [1]
      prev_tot2 = sum([np.abs(x[1]) for x in exp]) + np.abs(mean[1])
      tot = prev_tot2 - sum([np.abs(x[1]) for x in exp if x[0] in untrustworthy])
      trust['ContrastLIME'].add(i) if trust_fn(tot, prev_tot) else mistrust['ContrastLIME'].add(i)


      exp = exps['random'][i]
      trust['random'].add(i) if trust_fn_all(exp, untrustworthy) else mistrust['random'].add(i)

      exp = exps['greedy'][i]
      trust['greedy'].add(i) if trust_fn_all(exp, untrustworthy) else mistrust['greedy'].add(i)

    for expl in explainer_names:
      # switching the definition
      false_positives = set(trust[expl]).intersection(shouldnt_trust)
      true_positives = set(trust[expl]).difference(shouldnt_trust)
      false_negatives = set(mistrust[expl]).difference(shouldnt_trust)
      true_negatives = set(mistrust[expl]).intersection(shouldnt_trust)

      try:
        prec= len(true_positives) / float(len(true_positives) + len(false_positives))
      except:
        prec= 0
      try:
        rec= float(len(true_positives)) / (len(true_positives) + len(false_negatives))
      except:
        rec= 0
      try:
        neg_prec= len(true_negatives) / float(len(true_negatives) + len(false_negatives))
      except:
        neg_prec= 0
      try:
        neg_rec= float(len(true_negatives)) / (len(true_negatives) + len(false_positives))
      except:
        neg_rec= 0
      precision[expl].append(prec)
      recall[expl].append(rec)
      f1z = 2 * (prec * rec) / (prec + rec) if (prec and rec) else 0
      f1[expl].append(f1z)

      neg_precision[expl].append(neg_prec)
      neg_recall[expl].append(neg_rec)
      neg_f1z = 2 * (neg_prec * neg_rec) / (neg_prec + neg_rec) if (neg_prec and neg_rec) else 0
      neg_f1[expl].append(neg_f1z)

      macro_precision[expl].append(prec)
      macro_recall[expl].append(rec)
      macro_f1[expl].append(f1z)



  print('Average number of flipped predictions:', np.mean(flipped_preds_size), '+-', np.std(flipped_preds_size))
  print('Macro Precision:')
  for expl in explainer_names:
    print(expl, np.mean(macro_precision[expl]), '+-', np.std(macro_precision[expl]), 'pvalue', sp.stats.ttest_ind(macro_precision[expl], macro_precision['ContrastLIME'])[1].round(4))
  print()
  print('Macro Recall:')
  for expl in explainer_names:
    print(expl, np.mean(macro_recall[expl]), '+-', np.std(macro_recall[expl]), 'pvalue', sp.stats.ttest_ind(macro_recall[expl], macro_recall['ContrastLIME'])[1].round(4))
  print()
  print('Macro F1:')
  for expl in explainer_names:
    print(expl, np.mean(macro_f1[expl]), '+-', np.std(macro_f1[expl]), 'pvalue', sp.stats.ttest_ind(macro_f1[expl], macro_f1['ContrastLIME'])[1].round(4))
def run_experiment(df, dataset, algorithm, num_features, percent_untrustworthy,
                   num_rounds):
    train_data, train_labels, test_data, test_labels, class_names = LoadDataset(
        dataset)
    vectorizer = CountVectorizer(lowercase=False, binary=True)
    train_vectors = vectorizer.fit_transform(train_data)
    test_vectors = vectorizer.transform(test_data)
    terms = np.array(list(vectorizer.vocabulary_.keys()))
    indices = np.array(list(vectorizer.vocabulary_.values()))
    inverse_vocabulary = terms[np.argsort(indices)]

    np.random.seed(1)
    classifier = get_classifier(algorithm, vectorizer)
    classifier.fit(train_vectors, train_labels)

    np.random.seed(1)
    untrustworthy_rounds = []
    all_features = range(train_vectors.shape[1])
    num_untrustworthy = int(train_vectors.shape[1] * percent_untrustworthy)
    for _ in range(num_rounds):
        untrustworthy_rounds.append(
            np.random.choice(all_features, num_untrustworthy, replace=False))

    rho = 25
    kernel = lambda d: np.sqrt(np.exp(-(d**2) / rho**2))
    LIME = explainers.GeneralizedLocalExplainer(
        kernel,
        explainers.data_labels_distances_mapping_text,
        num_samples=15000,
        return_mean=True,
        verbose=False,
        return_mapped=True)

    parzen = parzen_windows.ParzenWindowClassifier()
    cv_preds = sklearn.cross_validation.cross_val_predict(classifier,
                                                          train_vectors,
                                                          train_labels,
                                                          cv=5)
    parzen.fit(train_vectors, cv_preds)
    sigmas = {
        'multi_polarity_electronics': {
            'neighbors': 0.75,
            'svm': 10.0,
            'tree': 0.5,
            'logreg': 0.5,
            'random_forest': 0.5,
            'embforest': 0.75
        },
        'multi_polarity_kitchen': {
            'neighbors': 1.0,
            'svm': 6.0,
            'tree': 0.75,
            'logreg': 0.25,
            'random_forest': 6.0,
            'embforest': 1.0
        },
        'multi_polarity_dvd': {
            'neighbors': 0.5,
            'svm': 0.75,
            'tree': 8.0,
            'logreg': 0.75,
            'random_forest': 0.5,
            'embforest': 5.0
        },
        'multi_polarity_books': {
            'neighbors': 0.5,
            'svm': 7.0,
            'tree': 2.0,
            'logreg': 1.0,
            'random_forest': 1.0,
            'embforest': 3.0
        }
    }
    parzen.sigma = sigmas[dataset][algorithm]

    explainer_names = ['LIME', 'random', 'greedy', 'parzen']

    # This will store the partial results so later it can be saved in "df"
    res = {k: '' for k in ['classifier'] + explainer_names}
    res['classifier'] = algorithm

    random = explainers.RandomExplainer()
    exps = {}
    for expl in explainer_names:
        exps[expl] = []

    predictions = classifier.predict(test_vectors)
    predict_probas = classifier.predict_proba(test_vectors)[:, 1]
    for i in range(test_vectors.shape[0]):
        print i
        sys.stdout.flush()
        exp, mean = LIME.explain_instance(test_vectors[i], 1,
                                          classifier.predict_proba,
                                          num_features)
        exps['LIME'].append((exp, mean))
        exp = parzen.explain_instance(test_vectors[i], 1,
                                      classifier.predict_proba, num_features,
                                      None)
        mean = parzen.predict_proba(test_vectors[i])[1]
        exps['parzen'].append((exp, mean))

        exp = random.explain_instance(test_vectors[i], 1, None, num_features,
                                      None)
        exps['random'].append(exp)

        exp = explainers.explain_greedy_martens(test_vectors[i],
                                                predictions[i],
                                                classifier.predict_proba,
                                                num_features)
        exps['greedy'].append(exp)

    precision = {}
    recall = {}
    f1 = {}
    for name in explainer_names:
        precision[name] = []
        recall[name] = []
        f1[name] = []
    flipped_preds_size = []
    for untrustworthy in untrustworthy_rounds:
        t = test_vectors.copy()
        t[:, untrustworthy] = 0
        mistrust_idx = np.argwhere(
            classifier.predict(t) != classifier.predict(test_vectors)).flatten(
            )
        print 'Number of suspect predictions', len(mistrust_idx)
        shouldnt_trust = set(mistrust_idx)
        flipped_preds_size.append(len(shouldnt_trust))
        mistrust = collections.defaultdict(lambda: set())
        trust = collections.defaultdict(lambda: set())
        trust_fn = lambda prev, curr: (prev > 0.5 and curr > 0.5) or (
            prev <= 0.5 and curr <= 0.5)
        trust_fn_all = lambda exp, unt: len([x[0] for x in exp
                                             if x[0] in unt]) == 0
        for i in range(test_vectors.shape[0]):
            exp, mean = exps['LIME'][i]
            prev_tot = predict_probas[i]
            prev_tot2 = sum([x[1] for x in exp]) + mean
            tot = prev_tot2 - sum([x[1] for x in exp if x[0] in untrustworthy])
            trust['LIME'].add(i) if trust_fn(
                tot, prev_tot) else mistrust['LIME'].add(i)

            exp, mean = exps['parzen'][i]
            prev_tot = mean
            tot = mean - sum([x[1] for x in exp if x[0] in untrustworthy])
            trust['parzen'].add(i) if trust_fn(
                tot, prev_tot) else mistrust['parzen'].add(i)
            exp = exps['random'][i]
            trust['random'].add(i) if trust_fn_all(
                exp, untrustworthy) else mistrust['random'].add(i)

            exp = exps['greedy'][i]
            trust['greedy'].add(i) if trust_fn_all(
                exp, untrustworthy) else mistrust['greedy'].add(i)

        for expl in explainer_names:
            # switching the definition
            false_positives = set(trust[expl]).intersection(shouldnt_trust)
            true_positives = set(trust[expl]).difference(shouldnt_trust)
            false_negatives = set(mistrust[expl]).difference(shouldnt_trust)
            true_negatives = set(mistrust[expl]).intersection(shouldnt_trust)

            try:
                prec = len(true_positives) / float(
                    len(true_positives) + len(false_positives))
            except:
                prec = 0
            try:
                rec = float(len(true_positives)) / (len(true_positives) +
                                                    len(false_negatives))
            except:
                rec = 0
            precision[expl].append(prec)
            recall[expl].append(rec)
            f1z = 2 * (prec * rec) / (prec + rec) if (prec and rec) else 0
            f1[expl].append(f1z)

    print 'Average number of flipped predictions:', np.mean(
        flipped_preds_size), '+-', np.std(flipped_preds_size)
    print 'Precision:'
    for expl in explainer_names:
        print expl, np.mean(precision[expl]), '+-', np.std(
            precision[expl]), 'pvalue', sp.stats.ttest_ind(
                precision[expl], precision['LIME'])[1].round(4)
    print
    print 'Recall:'
    for expl in explainer_names:
        print expl, np.mean(recall[expl]), '+-', np.std(
            recall[expl]), 'pvalue', sp.stats.ttest_ind(
                recall[expl], recall['LIME'])[1].round(4)
    print
    print 'F1:'
    for expl in explainer_names:
        print expl, np.mean(f1[expl]), '+-', np.std(
            f1[expl]), 'pvalue', sp.stats.ttest_ind(f1[expl],
                                                    f1['LIME'])[1].round(4)
        res[expl] = str('%.2f' % np.mean(f1[expl])) + '+-' + str(
            '%.2f' % np.std(f1[expl]))

    df = df.append(res, ignore_index=True)
    return df
def main():
    parser = argparse.ArgumentParser(description='Evaluate some explanations')
    parser.add_argument('--dataset',
                        '-d',
                        type=str,
                        required=True,
                        help='dataset name')
    parser.add_argument('--output_folder',
                        '-o',
                        type=str,
                        required=True,
                        help='output folder')
    parser.add_argument('--num_features',
                        '-k',
                        type=int,
                        required=True,
                        help='num features')
    parser.add_argument('--num_rounds',
                        '-r',
                        type=int,
                        required=True,
                        help='num rounds')
    parser.add_argument('--start_id',
                        '-i',
                        type=int,
                        default=0,
                        required=False,
                        help='output start id')
    args = parser.parse_args()
    dataset = args.dataset
    train_data, train_labels, test_data, test_labels, class_names = LoadDataset(
        dataset)
    rho = 25
    kernel = lambda d: np.sqrt(np.exp(-(d**2) / rho**2))
    local = explainers.GeneralizedLocalExplainer(
        kernel,
        explainers.data_labels_distances_mapping_text,
        num_samples=15000,
        return_mean=True,
        verbose=False,
        return_mapped=True)
    # Found through cross validation
    sigmas = {
        'multi_polarity_electronics': {
            'neighbors': 0.75,
            'svm': 10.0,
            'tree': 0.5,
            'logreg': 0.5,
            'random_forest': 0.5,
            'embforest': 0.75
        },
        'multi_polarity_kitchen': {
            'neighbors': 1.0,
            'svm': 6.0,
            'tree': 0.75,
            'logreg': 0.25,
            'random_forest': 6.0,
            'embforest': 1.0
        },
        'multi_polarity_dvd': {
            'neighbors': 0.5,
            'svm': 0.75,
            'tree': 8.0,
            'logreg': 0.75,
            'random_forest': 0.5,
            'embforest': 5.0
        },
        'multi_polarity_books': {
            'neighbors': 0.5,
            'svm': 7.0,
            'tree': 2.0,
            'logreg': 1.0,
            'random_forest': 1.0,
            'embforest': 3.0
        }
    }
    parzen1 = parzen_windows.ParzenWindowClassifier()
    parzen1.sigma = sigmas[dataset]['random_forest']
    parzen2 = parzen_windows.ParzenWindowClassifier()
    parzen2.sigma = sigmas[dataset]['random_forest']
    random = explainers.RandomExplainer()

    for Z in range(args.num_rounds):
        exps1 = {}
        exps2 = {}
        explainer_names = ['lime', 'parzen', 'random', 'greedy', 'mutual']
        for expl in explainer_names:
            exps1[expl] = []
            exps2[expl] = []
        print 'Round', Z
        sys.stdout.flush()
        fake_features_z = [([.1, .2], [.1,
                                       .1], 10)]  #, ([.2, .1], [.1,.1], 10)]
        clean_train, dirty_train, clean_test = corrupt_dataset(
            fake_features_z, train_data, train_labels, test_data, test_labels)
        vectorizer = CountVectorizer(lowercase=False, binary=True)
        dirty_train_vectors = vectorizer.fit_transform(dirty_train)
        clean_train_vectors = vectorizer.transform(clean_train)
        test_vectors = vectorizer.transform(clean_test)
        terms = np.array(list(vectorizer.vocabulary_.keys()))
        indices = np.array(list(vectorizer.vocabulary_.values()))
        inverse_vocabulary = terms[np.argsort(indices)]
        tokenizer = vectorizer.build_tokenizer()
        c1 = ensemble.RandomForestClassifier(n_estimators=30, max_depth=5)
        c2 = ensemble.RandomForestClassifier(n_estimators=30, max_depth=5)
        untrustworthy = [
            i for i, x in enumerate(inverse_vocabulary) if x.startswith('FAKE')
        ]
        train_idx, test_idx = tuple(
            cross_validation.ShuffleSplit(dirty_train_vectors.shape[0], 1,
                                          0.2))[0]
        train_acc1 = train_acc2 = test_acc1 = test_acc2 = 0
        print 'Trying to find trees:'
        sys.stdout.flush()
        iteration = 0
        found_tree = True
        while np.abs(train_acc1 -
                     train_acc2) > 0.001 or np.abs(test_acc1 -
                                                   test_acc2) < 0.05:
            iteration += 1
            c1.fit(dirty_train_vectors[train_idx], train_labels[train_idx])
            c2.fit(dirty_train_vectors[train_idx], train_labels[train_idx])
            train_acc1 = accuracy_score(
                train_labels[test_idx],
                c1.predict(dirty_train_vectors[test_idx]))
            train_acc2 = accuracy_score(
                train_labels[test_idx],
                c2.predict(dirty_train_vectors[test_idx]))
            test_acc1 = accuracy_score(test_labels, c1.predict(test_vectors))
            test_acc2 = accuracy_score(test_labels, c2.predict(test_vectors))
            if iteration == 3000:
                found_tree = False
                break
        if not found_tree:
            print 'skipping iteration', Z
            continue
        print 'done'
        print 'Train acc1:', train_acc1, 'Train acc2:', train_acc2
        print 'Test acc1:', test_acc1, 'Test acc2:', test_acc2
        sys.stdout.flush()
        predictions = c1.predict(dirty_train_vectors)
        predictions2 = c2.predict(dirty_train_vectors)
        predict_probas = c1.predict_proba(dirty_train_vectors)[:, 1]
        predict_probas2 = c2.predict_proba(dirty_train_vectors)[:, 1]
        cv_preds1 = cross_validation.cross_val_predict(
            c1, dirty_train_vectors[train_idx], train_labels[train_idx], cv=5)
        cv_preds2 = cross_validation.cross_val_predict(
            c2, dirty_train_vectors[train_idx], train_labels[train_idx], cv=5)
        parzen1.fit(dirty_train_vectors[train_idx], cv_preds1)
        parzen2.fit(dirty_train_vectors[train_idx], cv_preds2)
        pp = []
        pp2 = []
        true_labels = []
        iteration = 0
        for i in test_idx:
            if iteration % 50 == 0:
                print iteration
                sys.stdout.flush()
            iteration += 1
            pp.append(predict_probas[i])
            pp2.append(predict_probas2[i])
            true_labels.append(train_labels[i])
            exp, mean = local.explain_instance(dirty_train_vectors[i], 1,
                                               c1.predict_proba,
                                               args.num_features)
            exps1['lime'].append((exp, mean))

            exp = parzen1.explain_instance(dirty_train_vectors[i], 1,
                                           c1.predict_proba, args.num_features,
                                           None)
            mean = parzen1.predict_proba(dirty_train_vectors[i])[1]
            exps1['parzen'].append((exp, mean))

            exp = random.explain_instance(dirty_train_vectors[i], 1, None,
                                          args.num_features, None)
            exps1['random'].append(exp)

            exp = explainers.explain_greedy_martens(dirty_train_vectors[i],
                                                    predictions[i],
                                                    c1.predict_proba,
                                                    args.num_features)
            exps1['greedy'].append(exp)

            # Classifier 2
            exp, mean = local.explain_instance(dirty_train_vectors[i], 1,
                                               c2.predict_proba,
                                               args.num_features)
            exps2['lime'].append((exp, mean))

            exp = parzen2.explain_instance(dirty_train_vectors[i], 1,
                                           c2.predict_proba, args.num_features,
                                           None)
            mean = parzen2.predict_proba(dirty_train_vectors[i])[1]
            exps2['parzen'].append((exp, mean))

            exp = random.explain_instance(dirty_train_vectors[i], 1, None,
                                          args.num_features, None)
            exps2['random'].append(exp)

            exp = explainers.explain_greedy_martens(dirty_train_vectors[i],
                                                    predictions2[i],
                                                    c2.predict_proba,
                                                    args.num_features)
            exps2['greedy'].append(exp)

        out = {
            'true_labels': true_labels,
            'untrustworthy': untrustworthy,
            'train_acc1': train_acc1,
            'train_acc2': train_acc2,
            'test_acc1': test_acc1,
            'test_acc2': test_acc2,
            'exps1': exps1,
            'exps2': exps2,
            'predict_probas1': pp,
            'predict_probas2': pp2
        }
        pickle.dump(
            out,
            open(
                os.path.join(
                    args.output_folder, 'comparing_%s_%s_%d.pickle' %
                    (dataset, args.num_features, Z + args.start_id)), 'w'))