Пример #1
0
def diff_target_num(args):
    """
    Simply plot the avg. attack accuracy (over different targets) based on the number of iterations.
    And plot the avg confidence score (over different targets) of the malicious intended class (i.e., poison class)
    based on the number of iterations.
    Same for avg. attack time, avg. clean acc, and avg. loss
    """
    method, root_path, res_path, retrain_epochs, end2end, target_nums = \
        args.method, args.path, args.res_path, args.epochs, args.end2end, args.target_nums
    plot_root_path = "{}/{}/epochs-{}/{}".format(
        args.res_path, "end2end" if args.end2end else "transfer",
        retrain_epochs, method)

    print(
        "NOTE THAT WE ARE EVALUATING THE CASE THAT THE VICTIMS ARE RETRAINED FOR {} EPOCHS"
        .format(retrain_epochs))
    res = []
    target_ids = None
    for target_num in target_nums:
        r = read_attack_stats(root_path,
                              retrain_epochs=retrain_epochs,
                              target_num=target_num)
        if target_ids is None:
            target_ids = set(r['targets'].keys())
        else:
            target_ids = target_ids.intersection(r['targets'].keys())
        res.append(r)

    target_ids = sorted(list(target_ids))
    print("Target IDs: {}".format(target_ids))

    stats = [get_stats(r, target_ids) for r in res]
    attack_accs = [s[0] for s in stats]
    eval_attack_accs = [s[1] for s in stats]
    clean_acc = [s[2] for s in stats]
    times = [s[3] for s in stats]
    ites = [s[4] for s in stats]
    victims = [s[5] for s in stats]

    # Just check if we compare same iterations of attacks with each other
    ites_set = set(ites[0])
    for ites_tmp in ites[1:]:
        ites_set = ites_set.union(set(ites_tmp))
    assert len(ites_set) == len(set(ites[0]))
    ites = ites[0]

    # Just check if we compare same victim nets with each other
    victims_set = set(victims[0])
    for victims_tmp in victims[1:]:
        victims_set = victims_set.union(set(victims_tmp))
    assert len(victims_set) == len(set(victims[0]))
    victims = victims[0]

    if not os.path.exists(plot_root_path):
        os.makedirs(plot_root_path)

    for attack_accs1 in attack_accs:
        attack_accs1['meanVictim'] = attack_accs1.mean(axis=1)

    for attack_accs1 in eval_attack_accs:
        attack_accs1['meanVictim'] = attack_accs1.mean(axis=1)

    MAX_ACC = 75
    # plot avg. attack acc., one per each victim's network
    for counter, victim in enumerate(victims + ['meanVictim']):
        if victim == 'meanVictim':
            plt.figure(figsize=(5, 2.5), dpi=400)
        else:
            plt.figure(figsize=(6, 3), dpi=400)
        ax = plt.subplot(111)
        if victim == 'meanVictim':
            ax.set_xlabel('Iterations', fontsize=LABELSIZE - 2)
            ax.set_ylabel('Attack Success Rate', fontsize=LABELSIZE - 2)
        elif counter == 0:
            ax.set_xlabel('Iterations', fontsize=LABELSIZE)
            ax.set_ylabel('Attack Success Rate', fontsize=LABELSIZE)
        ax.grid(color='black', linestyle='dotted', linewidth=0.4)
        if victim == 'meanVictim':
            ax.set_ylim([0, MAX_ACC])
        else:
            ax.set_ylim([0, MAX_ACC])

        for attack_accs1, target_num in zip(attack_accs, target_nums):
            ax.plot(ites,
                    attack_accs1[victim],
                    label=TARGETNUMS_NAMES[target_num],
                    color=TARGETNUMS_COLORS[target_num],
                    linewidth=1.7,
                    linestyle=TARGETNUMS_LINESTYLES[target_num])
        tick = mtick.FormatStrFormatter('%d%%')
        ax.yaxis.set_major_formatter(tick)

        if 'mean' in victim:
            ax.legend(loc="best",
                      fancybox=True,
                      framealpha=0.5,
                      fontsize=LABELSIZE - 2)
        if xticks_short:
            locs, _ = xticks()
            xticks(locs[::5], ites[::5], rotation='vertical')

        plt.savefig('{}/{}-attack-acc-avg.pdf'.format(plot_root_path, victim),
                    bbox_inches='tight')
        plt.close()

    MAX_ACC = 75
    # plot avg. eval attack acc., one per each victim's network, against unseen different angles of cars
    for counter, victim in enumerate(victims + ['meanVictim']):
        if victim == 'meanVictim':
            plt.figure(figsize=(5, 2.5), dpi=400)
        else:
            plt.figure(figsize=(6, 3), dpi=400)
        ax = plt.subplot(111)
        if victim == 'meanVictim':
            ax.set_xlabel('Iterations', fontsize=LABELSIZE - 2)
            ax.set_ylabel('Attack Success Rate', fontsize=LABELSIZE - 2)
        elif counter == 0:
            ax.set_xlabel('Iterations', fontsize=LABELSIZE)
            ax.set_ylabel('Attack Success Rate', fontsize=LABELSIZE)
        ax.grid(color='black', linestyle='dotted', linewidth=0.4)
        if victim == 'meanVictim':
            ax.set_ylim([0, 50])
        else:
            ax.set_ylim([0, MAX_ACC])

        for attack_accs1, target_num in zip(eval_attack_accs, target_nums):
            ax.plot(ites,
                    attack_accs1[victim],
                    label=TARGETNUMS_NAMES[target_num],
                    color=TARGETNUMS_COLORS[target_num],
                    linewidth=1.7,
                    linestyle=TARGETNUMS_LINESTYLES[target_num])
        tick = mtick.FormatStrFormatter('%d%%')
        ax.yaxis.set_major_formatter(tick)

        if 'mean' in victim:
            ax.legend(loc="best",
                      fancybox=True,
                      framealpha=0.5,
                      fontsize=LABELSIZE - 2)
        if xticks_short:
            locs, _ = xticks()
            xticks(locs[::5], ites[::5], rotation='vertical')

        plt.savefig('{}/{}-eval-attack-acc-avg.pdf'.format(
            plot_root_path, victim),
                    bbox_inches='tight')
        plt.close()

    # plot avg. clean accuracy
    for victim in victims:
        plt.figure(figsize=(8, 4), dpi=400)
        ax = plt.subplot(111)
        ax.set_xlabel('Iterations', fontsize=LABELSIZE)
        ax.set_ylabel('Avg. Clean Test Accuracy - {}'.format(victim),
                      fontsize=LABELSIZE)
        ax.grid(color='black', linestyle='dotted', linewidth=0.4)
        # ax.set_ylim([20, 70])
        for clean_acc1, target_num in zip(clean_acc, target_nums):
            ax.plot(ites,
                    clean_acc1[victim],
                    label=TARGETNUMS_NAMES[target_num],
                    color=TARGETNUMS_COLORS[target_num],
                    linewidth=1.7,
                    linestyle=TARGETNUMS_LINESTYLES[target_num])
        if 'mean' in victim:
            ax.legend(loc="best",
                      fancybox=True,
                      framealpha=0.5,
                      fontsize=LABELSIZE - 2)
        if xticks_short:
            locs, _ = xticks()
            xticks(locs[::5], ites[::5], rotation='vertical')

        plt.savefig('{}/{}-clean-acc-avg.pdf'.format(plot_root_path, victim),
                    bbox_inches='tight')
        plt.close()

    # difference from the clean acc before poisoning
    clean_acc_diffs = [(clean_acc1 - clean_acc1.iloc[0]).mean(axis=1)
                       for clean_acc1 in clean_acc]

    plt.figure(figsize=(8, 4), dpi=400)
    ax = plt.subplot(111)
    ax.set_xlabel('Iterations', fontsize=LABELSIZE)
    ax.set_ylabel('Avg. Decrease of Clean Test Accuracy', fontsize=LABELSIZE)
    ax.grid(color='black', linestyle='dotted', linewidth=0.4)
    # ax.set_ylim([20, 70])
    for clean_acc_diff1, target_num in zip(clean_acc_diffs, target_nums):
        ax.plot(ites,
                clean_acc_diff1,
                label=TARGETNUMS_NAMES[target_num],
                color=TARGETNUMS_COLORS[target_num],
                linewidth=1.7,
                linestyle=TARGETNUMS_LINESTYLES[target_num])
    ax.legend(loc="best",
              fancybox=True,
              framealpha=0.5,
              fontsize=LABELSIZE - 2)
    if xticks_short:
        locs, _ = xticks()
        xticks(locs[::5], ites[::5], rotation='vertical')

    plt.savefig('{}/meanVictim-clean-acc-avg-diff.pdf'.format(plot_root_path),
                bbox_inches='tight')
    plt.close()

    # plot avg. time
    plt.figure(figsize=(5, 2.5), dpi=400)
    ax = plt.subplot(111)
    ax.set_xlabel('Iterations', fontsize=LABELSIZE)
    ax.set_ylabel('Time (minute)', fontsize=LABELSIZE)
    ax.grid(color='black', linestyle='dotted', linewidth=0.4)
    # ax.set_ylim([20, 70])
    for times1, target_num in zip(times, target_nums):
        ax.plot(ites, [int(t / 60) for t in times1],
                label=TARGETNUMS_NAMES[target_num],
                color=TARGETNUMS_COLORS[target_num],
                linewidth=1.7,
                linestyle=TARGETNUMS_LINESTYLES[target_num])
    ax.legend(loc="best",
              fancybox=True,
              framealpha=0.5,
              fontsize=LABELSIZE - 2)
    if xticks_short:
        locs, _ = xticks()
        xticks(locs[::5], ites[::5], rotation='vertical')

    ax.set_ylim([0, 450])

    plt.savefig('{}/time.pdf'.format(plot_root_path), bbox_inches='tight')
    plt.close()
def compare_with_baseline(paths, methods, plot_root_path, retrain_epochs):
    """
    Simply plot the avg. attack accuracy (over different targets) based on the number of iterations.
    And plot the avg confidence score (over different targets) of the malicious intended class (i.e., poison class)
    based on the number of iterations.
    Same for avg. attack time, avg. clean acc, and avg. loss
    """

    print("NOTE THAT WE ARE EVALUATING THE CASE THAT THE VICTIMS ARE RETRAINED FOR {} EPOCHS"
          .format(retrain_epochs))
    res = []
    target_ids = None
    for path in paths:
        r = read_attack_stats(path, retrain_epochs=retrain_epochs)
        if target_ids is None:
            target_ids = set(r['targets'].keys())
        else:
            target_ids = target_ids.intersection(r['targets'].keys())
        res.append(r)

    target_ids = sorted(list(target_ids))
    print("Evaluating {}\n Target IDs: {}".format("\n".join(paths), target_ids))

    stats = [get_stats(r, target_ids) for r in res]
    attack_accs = [s[0] for s in stats]
    scores = [s[1] for s in stats]
    clean_acc = [s[2] for s in stats]
    times = [s[3] for s in stats]
    losses = [s[4] for s in stats]
    ites = [s[5] for s in stats]
    victims = [s[6] for s in stats]
    coeffs = [s[7] for s in stats]

    # Just check if we compare same iterations of attacks with each other
    ites_set = set(ites[0])
    for ites_tmp in ites[1:]:
        ites_set = ites_set.union(set(ites_tmp))
    assert len(ites_set) == len(set(ites[0]))
    ites = ites[0]

    # Just check if we compare same victim nets with each other
    victims_set = set(victims[0])
    for victims_tmp in victims[1:]:
        victims_set = victims_set.union(set(victims_tmp))
    assert len(victims_set) == len(set(victims[0]))
    victims = victims[0]

    if not os.path.exists(plot_root_path):
        os.makedirs(plot_root_path)

    for attack_accs1 in attack_accs:
        attack_accs1['meanVictim'] = attack_accs1.mean(axis=1)

    MAX_ACC = 75
    # plot avg. attack acc., one per each victim's network
    for counter, victim in enumerate(victims + ['meanVictim']):
        if victim == 'meanVictim':
            plt.figure(figsize=(5, 2.5), dpi=400)
        else:
            plt.figure(figsize=(6, 3), dpi=400)
        ax = plt.subplot(111)
        if victim == 'meanVictim':
            ax.set_xlabel('Iterations', fontsize=LABELSIZE - 2)
            ax.set_ylabel('Attack Success Rate', fontsize=LABELSIZE - 2)
        elif counter == 0:
            ax.set_xlabel('Iterations', fontsize=LABELSIZE)
            ax.set_ylabel('Attack Success Rate', fontsize=LABELSIZE)
        ax.grid(color='black', linestyle='dotted', linewidth=0.5)
        ax.set_ylim([0, MAX_ACC])

        for attack_accs1, method1 in zip(attack_accs, methods):
            ax.plot(ites, attack_accs1[victim], label=METHODS_NAMES[method1], color=METHODS_COLORS[method1],
                    linewidth=1.5, linestyle=METHODS_LINESTYLES[method1])
        tick = mtick.FormatStrFormatter('%d%%')
        ax.yaxis.set_major_formatter(tick)
        if 'mean' in victim or counter == 0:
            ax.legend(loc="upper left", fancybox=True, framealpha=0.5, fontsize=9)
        if xticks_short:
            locs, _ = xticks()
            xticks(locs[::5], ites[::5], rotation='vertical')

        plt.savefig('{}/{}-attack-acc-avg.pdf'.format(plot_root_path, victim), bbox_inches='tight')
        plt.close()

    print("Attack Acc.")
    for attack_accs1, method1 in zip(attack_accs, methods):
        attack_acc = attack_accs1['meanVictim']
        print(method1, attack_acc)

    # plot avg. (malicious) class score, one per each victim's network
    for victim in victims:
        plt.figure(figsize=(8, 4), dpi=400)
        ax = plt.subplot(111)
        ax.set_xlabel('Iterations', fontsize=LABELSIZE)
        ax.set_ylabel('Avg. Probability Score of Malicious (i.e., Poison) Class - {}'.format(victim),
                      fontsize=LABELSIZE)
        ax.grid(color='black', linestyle='dotted', linewidth=0.5)
        # ax.set_ylim([20, 70])
        for scores1, method1 in zip(scores, methods):
            ax.plot(ites, scores1[victim], label=METHODS_NAMES[method1], color=METHODS_COLORS[method1], linewidth=1.5,
                    linestyle=METHODS_LINESTYLES[method1])
        ax.legend(loc="upper left", fancybox=True, framealpha=0.5, fontsize=9)
        if xticks_short:
            locs, _ = xticks()
            xticks(locs[::5], ites[::5], rotation='vertical')

        plt.savefig('{}/{}-attack-score-avg.pdf'.format(plot_root_path, victim), bbox_inches='tight')
        plt.close()

    # plot avg. clean accuracy
    for victim in victims:
        plt.figure(figsize=(8, 4), dpi=400)
        ax = plt.subplot(111)
        ax.set_xlabel('Iterations', fontsize=LABELSIZE)
        ax.set_ylabel('Avg. Clean Test Accuracy - {}'.format(victim), fontsize=LABELSIZE)
        ax.grid(color='black', linestyle='dotted', linewidth=0.5)
        # ax.set_ylim([20, 70])
        for clean_acc1, method1 in zip(clean_acc, methods):
            ax.plot(ites, clean_acc1[victim], label=METHODS_NAMES[method1], color=METHODS_COLORS[method1],
                    linewidth=1.5,
                    linestyle=METHODS_LINESTYLES[method1])
        ax.legend(loc="upper left", fancybox=True, framealpha=0.5, fontsize=9)
        if xticks_short:
            locs, _ = xticks()
            xticks(locs[::5], ites[::5], rotation='vertical')

        plt.savefig('{}/{}-clean-acc-avg.pdf'.format(plot_root_path, victim), bbox_inches='tight')
        plt.close()

    # difference from the clean acc before poisoning
    clean_acc_diffs = [(clean_acc1 - clean_acc1.iloc[0]).mean(axis=1) for clean_acc1 in clean_acc]

    plt.figure(figsize=(8, 4), dpi=400)
    ax = plt.subplot(111)
    ax.set_xlabel('Iterations', fontsize=LABELSIZE)
    ax.set_ylabel('Avg. Clean Test Accuracy Increase/Decrease', fontsize=LABELSIZE)
    ax.grid(color='black', linestyle='dotted', linewidth=0.5)
    # ax.set_ylim([20, 70])
    for clean_acc_diff1, method1 in zip(clean_acc_diffs, methods):
        ax.plot(ites, clean_acc_diff1, label=METHODS_NAMES[method1], color=METHODS_COLORS[method1], linewidth=1.5,
                linestyle=METHODS_LINESTYLES[method1])
    ax.legend(loc="upper left", fancybox=True, framealpha=0.5, fontsize=9)
    if xticks_short:
        locs, _ = xticks()
        xticks(locs[::5], ites[::5], rotation='vertical')

    plt.savefig('{}/meanVictim-clean-acc-avg-diff.pdf'.format(plot_root_path), bbox_inches='tight')
    plt.close()

    # plot avg. time
    plt.figure(figsize=(8, 4), dpi=400)
    ax = plt.subplot(111)
    ax.set_xlabel('Iterations', fontsize=LABELSIZE)
    ax.set_ylabel('Time (minute)', fontsize=LABELSIZE)
    ax.grid(color='black', linestyle='dotted', linewidth=0.5)
    # ax.set_ylim([20, 70])
    for times1, method1 in zip(times, methods):
        ax.plot(ites, [int(t / 60) for t in times1], label=METHODS_NAMES[method1], color=METHODS_COLORS[method1],
                linewidth=1.5, linestyle=METHODS_LINESTYLES[method1])
    ax.legend(loc="upper left", fancybox=True, framealpha=0.5, fontsize=9)
    if xticks_short:
        locs, _ = xticks()
        xticks(locs[::5], ites[::5], rotation='vertical')

    plt.savefig('{}/time.pdf'.format(plot_root_path), bbox_inches='tight')
    plt.close()
if __name__ == '__main__':
    import sys
    epochs = sys.argv[1]
    paths = sys.argv[2:]
    # assert 'convex' in paths[0]
    # assert len(paths) <= 1 or ('mean' in paths[1] and 'mean-' not in paths[1])
    # assert len(paths) <= 2 or 'mean-' in paths[2]

    print(
        "NOTE THAT WE ARE EVALUATING THE CASE THAT THE VICTIMS ARE RETRAINED FOR {} EPOCHS"
        .format(epochs))
    res = []
    target_ids = None
    for path in paths:
        r = read_attack_stats(path, retrain_epochs=epochs)
        if target_ids is None:
            target_ids = set(r['targets'].keys())
        else:
            target_ids = target_ids.intersection(r['targets'].keys())
        res.append(r)

    target_ids = sorted(list(target_ids))
    print("Evaluating {}\n Target IDs: {}".format("\n".join(paths),
                                                  target_ids))

    for path, r in zip(paths, res):
        plot_attack_avg_stats_over_ites(
            poisons_root_path=path,
            res=r,
            target_ids=target_ids,
Пример #4
0
    IPython.embed()


if __name__ == '__main__':
    import sys
    path = sys.argv[1]
    method = sys.argv[2]
    assert method in path

    # (epochs, sgd, dropout)
    settings = [(epoch, False, 0.0) for epoch in range(10, 101, 10)] + \
               [(epoch, True, 0.0) for epoch in range(10, 101, 10)] + \
               [(80, False, 0.2), (100, False, 0.2)] + [(180, False, 0.2), (200, False, 0.2)]
    res = []
    target_ids = None
    for epochs, sgd, dropout in settings:
        r = read_attack_stats(path,
                              retrain_epochs=epochs,
                              sgd=sgd,
                              dropout=dropout)
        if target_ids is None:
            target_ids = set(r['targets'].keys())
        else:
            target_ids = target_ids.intersection(r['targets'].keys())
        res.append(r)

    target_ids = sorted(list(target_ids))
    print("Evaluating {}\n Target IDs: {}".format(path, target_ids))

    print_latex(res, ite='4000')
Пример #5
0
        locs, _ = xticks()
        xticks(locs[::5], ites[::5], rotation='vertical')
    print("Avg. time after {}: {}".format(ites[-1], int(times[-1]/60)))
    plt.xticks(rotation=90)
    plt.savefig('{}/time.pdf'.format(plot_root_path), bbox_inches='tight')
    plt.close()


if __name__ == '__main__':
    import sys
    convex_path = sys.argv[1]
    mean_path = sys.argv[2]
    epochs = sys.argv[3]
    assert 'convex' in convex_path
    assert 'mean' in mean_path

    print("NOTE THAT WE ARE EVALUATING THE CASE THAT THE VICTIMS ARE RETRAINED FOR {} EPOCHS"
          .format(epochs))
    convex_res = read_attack_stats(convex_path, retrain_epochs=epochs)
    convex_target_ids = set(convex_res['targets'].keys())
    mean_res = read_attack_stats(mean_path, retrain_epochs=epochs)
    mean_target_ids = set(mean_res['targets'].keys())

    target_ids = convex_target_ids.intersection(mean_target_ids)
    target_ids = sorted(list(target_ids))
    print("Evaluating {} and {}. Target IDs: {}".format(convex_path, mean_path, target_ids))

    plot_attack_avg_stats_over_ites(poisons_root_path=convex_path, res=convex_res, target_ids=target_ids)
    plot_attack_avg_stats_over_ites(poisons_root_path=mean_path, res=mean_res, target_ids=target_ids)