コード例 #1
0
def basic_conv(dataset, num_params, softmax_test, iterations=3000):
    batch_size = 50

    # Global
    softmax_model = softmax_model_obj.SoftMaxModel(dataset, numClasses)

    print("Start training")
    acc_in_iterations = []  # 记录结果
    delta_in_iterations = []
    weights = np.random.rand(num_params) / 100.0

    train_progress = np.zeros(iterations)
    test_progress = np.zeros(iterations)

    for i in range(iterations):
        deltas, _ = softmax_model.privateFun(weights, batch_size=batch_size)
        weights = weights + deltas
        if i % 10 == 0:
            acc_in_iterations.append(
                poisoning_compare.eval(Xtest, ytest, weights, int(from_class), int(to_class),
                                       numClasses, numFeatures, verbose=False))
            if i % 100 == 0:
                print("Train error: %.10f" % softmax_test.train_error(weights))

    print("Done iterations!")
    print("Train error: %d" % softmax_test.train_error(weights))
    print("Test error: %d" % softmax_test.test_error(weights))
    return weights, acc_in_iterations
コード例 #2
0
ファイル: ML_pytorch.py プロジェクト: whqxbs/FoolsGold
            to_class = attack_delim[2]
            for i in range(int(sybil_set_size)):
                models.append(dataPath + "_bad_" + from_class + "_" + to_class)

        weights = non_iid(models,
                          numClasses,
                          numParams,
                          iterations,
                          ideal_attack=False)
        # weights = basic_conv(dataset, numParams, iterations=3000)

        # for attack in argv[2:]:
        #     attack_delim = attack.split("_")
        #     from_class = attack_delim[1]
        #     to_class = attack_delim[2]
        #     score = poisoning_compare.eval(Xtest, ytest, weights, int(from_class), int(to_class), numClasses, numFeatures)
        #     eval_data[run] = score

    # Sandbox: difference between ideal bad model and global model
    compare = False
    if compare:
        bad_weights = basic_conv(
            dataPath + "_bad_ideal_" + from_class + "_" + to_class, numParams,
            softmax_test)
        poisoning_compare.eval(Xtest, ytest, bad_weights, int(from_class),
                               int(to_class), numClasses, numFeatures)

        diff = np.reshape(bad_weights - weights, (numClasses, numFeatures))
        abs_diff = np.reshape(np.abs(bad_weights - weights),
                              (numClasses, numFeatures))
コード例 #3
0
def no_attack(model_names, numClasses, numParams, softmax_test, iterations=3000):
    # SGD batch size
    batch_size = 50

    # The number of local steps each client takes
    fed_avg_size = 1

    list_of_models = []

    for dataset in model_names[: numClasses]:
        list_of_models.append(softmax_model_obj.SoftMaxModel(dataset, numClasses))

    numClients = len(list_of_models)
    model_aggregator.init(numClients, numParams, numClasses)

    print("\nStart training across " + str(numClients) + " clients with no attack.")

    weights = np.random.rand(numParams) / 100.0
    lr = np.ones(numClients, )
    acc_in_iterations = []
    delta_each_client = []
    train_progress = []
    norm_progress = []
    loss_progress = []

    # The number of previous iterations to use FoolsGold on
    memory_size = 0
    delta_memory = np.zeros((numClients, numParams, memory_size))

    summed_deltas = np.zeros((numClients, numParams))

    for i in range(iterations):

        delta = np.zeros((numClients, numParams))
        losses = np.zeros(numClients)

        ##################################
        # Use significant features filter or not
        ##################################

        # Significant features filter, the top k biggest weights
        # topk = int(numParams / 2)
        # sig_features_idx = np.argpartition(weights, -topk)[-topk:]
        sig_features_idx = np.arange(numParams)

        for k in range(len(list_of_models)):

            delta[k, :], losses[k] = list_of_models[k].privateFun(weights,
                                                                  batch_size=batch_size,
                                                                  num_iterations=fed_avg_size, iter_num=i)

            # normalize delta
            if np.linalg.norm(delta[k, :]) > 1:
                delta[k, :] = delta[k, :] / np.linalg.norm(delta[k, :])

        ##################################
        # Use FoolsGold or something else
        ##################################

        # Use Foolsgold (can optionally clip gradients via Krum)
        # this_delta = model_aggregator.foolsgold(delta, summed_deltas,
        #                                              sig_features_idx, i, weights, clip=1)
        this_delta = np.dot(delta.T, lr)
        # delta_each_client.append(np.hstack((delta[:, 7000], this_delta[7000])))
        # Krum
        # this_delta = model_aggregator.krum(delta, clip=1)

        # Simple Functions
        # this_delta = model_aggregator.average(delta)
        # this_delta = model_aggregator.median(delta)
        # this_delta = model_aggregator.trimmed_mean(delta, 0.2)

        weights = weights + this_delta

        if i % 10 == 0:
            norm_progress.append(np.mean(np.linalg.norm(delta, axis=1)))
            test_error = softmax_test.test_error(weights)
            train_progress.append(test_error)
            acc_in_iterations.append(
                [test_error] + list(poisoning_compare.eval(Xtest, ytest, weights, int(from_class), int(to_class),
                                                           numClasses, numFeatures, verbose=False)))

            # if i % 100 == 0:
            #     print("Validation error: %.5f" % test_error)
    # pd.DataFrame(columns=['client{}'.format(i) for i in range(15)] + ['combined'], data=delta_each_client).to_csv(
    #     'delta.csv')
    test_error = softmax_test.test_error(weights)
    acc_in_iterations.append(
        [test_error] + list(poisoning_compare.eval(Xtest, ytest, weights, int(from_class), int(to_class),
                                                   numClasses, numFeatures, verbose=True)))
    column = ['iteration', 'Test error', 'Accuracy overall', 'Accuracy on other digits',
              'Target Accuracy on source label',
              'Target Accuracy on target label', 'Target Attack Rate']
    acc_in_iterations = np.insert(acc_in_iterations, 0, values=np.arange(0, iterations + 1, 10), axis=1)
    res = pd.DataFrame(columns=column, data=acc_in_iterations)
    res.to_csv('_'.join(argv[:2]) + '_no_attack.csv')
    print("Done iterations!")
    print("Train error: {}".format(softmax_test.train_error(weights)))
    print("Test error: {}".format(softmax_test.test_error(weights)))
    return weights, norm_progress, train_progress, acc_in_iterations
コード例 #4
0
ファイル: ML_noisyPoisoners.py プロジェクト: whqxbs/FoolsGold
def non_iid(model_names,
            numClasses,
            numParams,
            softmax_test,
            topk_prop,
            iterations=3000,
            numSybils=2,
            ideal_attack=False,
            poisoner_indices=[],
            solution=None):
    batch_size = 50
    topk = int(numParams / 10)

    list_of_models = []

    for dataset in model_names:
        list_of_models.append(
            softmax_model_obj.SoftMaxModel(dataset, numClasses))

    # Include the model that sends the ideal vector on each iteration
    if ideal_attack:
        list_of_models.append(
            softmax_model_obj.SoftMaxModelEvil(dataPath + "_bad_ideal_4_9",
                                               numClasses))

    numClients = len(list_of_models)
    model_aggregator.init(numClients, numParams, numClasses)

    print("\nStart training across " + str(numClients) +
          " clients with solution " + str(solution) + '.')

    weights = np.random.rand(numParams) / 100.0
    lr = np.ones(numClients, )
    acc_in_iterations = []
    delta_all = []
    train_progress = []
    norm_progress = []
    loss_progress = []

    summed_deltas = np.zeros((numClients, numParams))

    for i in range(iterations):

        delta = np.zeros((numClients, numParams))

        # Significant features filter
        # sig_features_idx = np.argpartition(weights, -topk)[-topk:]
        sig_features_idx = np.arange(numParams)

        for k in range(len(list_of_models)):
            delta[k, :], _ = list_of_models[k].privateFun(weights, batch_size)

            # normalize delta
            if np.linalg.norm(delta[k, :]) > 1:
                delta[k, :] = delta[k, :] / np.linalg.norm(delta[k, :])

        # Add adversarial noise
        noisevec = rescale(np.random.rand(numParams), np.min(delta),
                           np.max(delta))
        delta[
            poisoner_indices[0], :] = delta[poisoner_indices[0], :] + noisevec
        delta[
            poisoner_indices[1], :] = delta[poisoner_indices[1], :] - noisevec

        # Track the total vector from each individual client
        summed_deltas = summed_deltas + delta
        if solution:
            if solution == 'fg':
                # Use Foolsgold
                this_delta = model_aggregator.foolsgold(delta,
                                                        summed_deltas,
                                                        sig_features_idx,
                                                        i,
                                                        weights,
                                                        lr,
                                                        topk_prop,
                                                        importance=False,
                                                        importanceHard=True)
            if solution == 'ours':
                this_delta, lr = model_aggregator.foolsgold2(
                    delta,
                    summed_deltas,
                    sig_features_idx,
                    i,
                    weights,
                    lr,
                    topk_prop,
                    importance=False,
                    importanceHard=True)
            if solution == 'krum':
                # Krum
                this_delta = model_aggregator.krum(delta, clip=1)
            if solution == 'average':
                this_delta = model_aggregator.average(delta)
            if solution == 'median':
                this_delta = model_aggregator.median(delta)
            if solution == 'trimmed_mean':
                this_delta = model_aggregator.trimmed_mean(delta, 0.2)
        else:
            this_delta = np.dot(delta.T, lr)

        weights = weights + this_delta

        if i % 10 == 0:
            delta_index = heapq.nlargest(20, range(len(this_delta)),
                                         this_delta.take)
            delta_each_client = []
            for idx in delta_index:
                delta_each_client.append(
                    np.hstack(([i, idx], delta[:, idx], this_delta[idx])))
            delta_all.append(delta_each_client)
            norm_progress.append(np.mean(np.linalg.norm(delta, axis=1)))
            test_error = softmax_test.test_error(weights)
            train_progress.append(test_error)
            acc_in_iterations.append([test_error] + list(
                poisoning_compare.eval(Xtest,
                                       ytest,
                                       weights,
                                       int(from_class),
                                       int(to_class),
                                       numClasses,
                                       numFeatures,
                                       verbose=False)))

            # if i % 100 == 0:
            #     print("Validation error: %.5f" % test_error)
    column = ['iteration', 'deltaInxex'
              ] + ['client{}'.format(i)
                   for i in range(numClients)] + ['combined']
    pd.DataFrame(columns=column, data=np.reshape(
        delta_all, (-1, len(column)))).to_csv('_'.join(argv) + '_' +
                                              str(solution) + '_delta.csv')
    test_error = softmax_test.test_error(weights)
    acc_in_iterations.append([test_error] + list(
        poisoning_compare.eval(Xtest,
                               ytest,
                               weights,
                               int(from_class),
                               int(to_class),
                               numClasses,
                               numFeatures,
                               verbose=True)))
    # column = ['iteration', 'Test error', 'Accuracy overall', 'Accuracy on other digits',
    #           'Target Accuracy on source label',
    #           'Target Accuracy on target label', 'Target Attack Rate']
    # acc_in_iterations = np.insert(acc_in_iterations, 0, values=np.arange(0, iterations + 1, 10), axis=1)
    # res = pd.DataFrame(columns=column, data=acc_in_iterations)
    # res.to_csv('_'.join(argv) + '_' + str(solution) + '.csv')
    print("Done iterations!")
    print("Train error: {}".format(softmax_test.train_error(weights)))
    print("Test error: {}".format(softmax_test.test_error(weights)))
    # pdb.set_trace()
    # import sklearn.metrics.pairwise as smp
    # cs = smp.cosine_similarity(summed_deltas)
    return weights