Exemplo n.º 1
0
def eval_setup(global_weights):
    args = gv.args

    if 'MNIST' in args.dataset:
        K.set_learning_phase(0)

    # global_weights_np = np.load(gv.dir_name + 'global_weights_t%s.npy' % t)
    global_weights_np = global_weights

    if 'MNIST' in args.dataset:
        global_model = model_mnist(type=args.model_num)
    elif args.dataset == 'CIFAR-10':
        global_model = cifar_10_model()
    elif args.dataset == 'census':
        global_model = census_model_1()

    if args.dataset == 'census':
        x = tf.placeholder(shape=(None,
                                gv.DATA_DIM), dtype=tf.float32)
    else:
        x = tf.placeholder(shape=(None,
                                  gv.IMAGE_ROWS,
                                  gv.IMAGE_COLS,
                                  gv.NUM_CHANNELS), dtype=tf.float32)
    y = tf.placeholder(dtype=tf.int64)

    logits = global_model(x)
    prediction = tf.nn.softmax(logits)
    loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
        labels=y, logits=logits))

    if args.k > 1:
        config = tf.ConfigProto(gpu_options=gv.gpu_options)
        # config.gpu_options.allow_growth = True
        sess = tf.Session(config=config)
    elif args.k == 1:
        sess = tf.Session()
    
    K.set_session(sess)
    sess.run(tf.global_variables_initializer())

    global_model.set_weights(global_weights_np)

    return x, y, sess, prediction, loss
Exemplo n.º 2
0
def master():
    K.set_learning_phase(1)

    args = gv.args
    print('Initializing master model')
    config = tf.ConfigProto(gpu_options=gv.gpu_options)
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    K.set_session(sess)
    sess.run(tf.global_variables_initializer())

    if 'MNIST' in args.dataset:
        global_model = model_mnist(type=args.model_num)
    elif args.dataset == 'census':
        global_model = census_model_1()
    global_model.summary()
    global_weights_np = global_model.get_weights()
    np.save(gv.dir_name + 'global_weights_t0.npy', global_weights_np)

    return
Exemplo n.º 3
0
def main(model_name, model_type):
    np.random.seed(0)
    assert keras.backend.backend() == "tensorflow"
    set_mnist_flags()

    flags.DEFINE_bool('NUM_EPOCHS', args.epochs, 'Number of epochs')

    # Get MNIST test data
    X_train, Y_train, X_test, Y_test = data_mnist()

    # Initialize substitute training set reserved for adversary
    X_sub = X_test[:150]
    Y_sub = np.argmax(Y_test[:150], axis=1)

    # Redefine test set as remaining samples unavailable to adversaries
    X_test = X_test[150:]
    Y_test = Y_test[150:]

    data_gen = data_gen_mnist(X_train)

    x = K.placeholder(
        (None, FLAGS.IMAGE_ROWS, FLAGS.IMAGE_COLS, FLAGS.NUM_CHANNELS))

    y = K.placeholder(shape=(None, FLAGS.NUM_CLASSES))

    model = model_mnist(type=model_type)
    # model = cnn_model()
    prediction = model(x)
    # Train an MNIST model
    # tf_train(x, y, model, X_train, Y_train, data_gen)

    train_params = {
        'nb_epochs': args.epochs,
        'batch_size': FLAGS.BATCH_SIZE,
        'learning_rate': 0.001
    }

    def evaluate_1():
        eval_params = {'batch_size': FLAGS.BATCH_SIZE}
        test_accuracy = model_eval(K.get_session(),
                                   x,
                                   y,
                                   prediction,
                                   X_test,
                                   Y_test,
                                   args=eval_params)
        print('Test accuracy of blackbox on legitimate test '
              'examples: {:.3f}'.format(test_accuracy))

    model_train(K.get_session(),
                x,
                y,
                model,
                X_train,
                Y_train,
                data_gen,
                evaluate=evaluate_1,
                args=train_params)

    save_model(model, model_name)
    json_string = model.to_json()
    with open(model_name + '.json', 'wr') as f:
        f.write(json_string)

    # Finally print the result!
    test_error = tf_test_error_rate(model, x, X_test, Y_test)
    print('Test error: %.1f%%' % test_error)
Exemplo n.º 4
0
args = gv.args

weights_np = np.load(gv.dir_name + 'global_weights_t%s.npy' % 8)

X_train, Y_train, X_test, Y_test, Y_test_uncat = data_setup()

mal_analyse = True

if mal_analyse:
    mal_data_X, mal_data_Y, true_labels = mal_data_setup(X_test, Y_test, Y_test_uncat, gen_flag=False)

label_to_class_name = [str(i) for i in range(gv.NUM_CLASSES)]

if 'MNIST' in args.dataset:
    model = model_mnist(type=args.model_num)
elif args.dataset == 'CIFAR-10':
    model = cifar_10_model()

x = tf.placeholder(shape=(None,
                          gv.IMAGE_ROWS,
                          gv.IMAGE_COLS,
                          gv.NUM_CHANNELS), dtype=tf.float32)
y = tf.placeholder(dtype=tf.int64)

logits = model(x)
prediction = tf.nn.softmax(logits)

sess = tf.Session()

K.set_session(sess)
Exemplo n.º 5
0
def agent(i, X_shard, Y_shard, t, gpu_id, return_dict, X_test, Y_test, lr=None):
    K.set_learning_phase(1)

    args = gv.args
    if lr is None:
        lr = args.eta
    # print('Agent %s on GPU %s' % (i,gpu_id))
    # set environment
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)

    shared_weights = np.load(gv.dir_name + 'global_weights_t%s.npy' % t)
    shard_size = len(X_shard)

    # if i == 0:
    #     # eval_success, eval_loss = eval_minimal(X_test,Y_test,x, y, sess, prediction, loss)
    #     eval_success, eval_loss = eval_minimal(X_test,Y_test,shared_weights)
    #     print('Global success at time {}: {}, loss {}'.format(t,eval_success,eval_loss))

    if args.steps is not None:
        num_steps = args.steps
    else:
        num_steps = int(args.E) * shard_size / args.B

    # with tf.device('/gpu:'+str(gpu_id)):
    if args.dataset == 'census':
        x = tf.placeholder(shape=(None,
                              gv.DATA_DIM), dtype=tf.float32)
        # y = tf.placeholder(dtype=tf.float32)
        y = tf.placeholder(dtype=tf.int64)
    else:
        x = tf.placeholder(shape=(None,
                                  gv.IMAGE_ROWS,
                                  gv.IMAGE_COLS,
                                  gv.NUM_CHANNELS), dtype=tf.float32)
        y = tf.placeholder(dtype=tf.int64)

    if 'MNIST' in args.dataset:
        agent_model = model_mnist(type=args.model_num)
    elif args.dataset == 'census':
        agent_model = census_model_1()

    logits = agent_model(x)

    if args.dataset == 'census':
        # loss = tf.nn.sigmoid_cross_entropy_with_logits(
        #     labels=y, logits=logits)
        loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=y, logits=logits))
    else:
        loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=y, logits=logits))
    prediction = tf.nn.softmax(logits)  

    if args.optimizer == 'adam':
        optimizer = tf.train.AdamOptimizer(
            learning_rate=lr).minimize(loss)
    elif args.optimizer == 'sgd':
        optimizer = tf.train.GradientDescentOptimizer(
            learning_rate=lr).minimize(loss)

    if args.k > 1:
        config = tf.ConfigProto(gpu_options=gv.gpu_options)
        # config.gpu_options.allow_growth = True
        sess = tf.Session(config=config)
    elif args.k == 1:
        sess = tf.Session()
    K.set_session(sess)
    sess.run(tf.global_variables_initializer())

    agent_model.set_weights(shared_weights)
    # print('loaded shared weights')

    start_offset = 0
    if args.steps is not None:
        start_offset = (t*args.B*args.steps) % (shard_size - args.B)

    for step in range(num_steps):
        offset = (start_offset + step * args.B) % (shard_size - args.B)
        X_batch = X_shard[offset: (offset + args.B)]
        Y_batch = Y_shard[offset: (offset + args.B)]
        Y_batch_uncat = np.argmax(Y_batch, axis=1)
        _, loss_val = sess.run([optimizer,loss], feed_dict={x: X_batch, y: Y_batch_uncat})
        if step % 1000 == 0:
            print ('Agent %s, Step %s, Loss %s, offset %s' % (i,step,loss_val, offset))
            # local_weights = agent_model.get_weights()
            # eval_success, eval_loss = eval_minimal(X_test,Y_test,x, y, sess, prediction, loss)
            # print('Agent {}, Step {}: success {}, loss {}'.format(i,step,eval_success,eval_loss))

    local_weights = agent_model.get_weights()
    local_delta = local_weights - shared_weights
    
    # eval_success, eval_loss = eval_minimal(X_test,Y_test,x, y, sess, prediction, loss)
    eval_success, eval_loss = eval_minimal(X_test,Y_test,local_weights)


    print('Agent {}: success {}, loss {}'.format(i,eval_success,eval_loss))

    return_dict[str(i)] = np.array(local_delta)

    np.save(gv.dir_name + 'ben_delta_%s_t%s.npy' % (i,t), local_delta)

    return
Exemplo n.º 6
0
def mal_agent(X_shard, Y_shard, mal_data_X, mal_data_Y, t, gpu_id, return_dict,
              mal_visible, X_test, Y_test):

    args = gv.args

    shared_weights = np.load(gv.dir_name + 'global_weights_t%s.npy' % t)

    holdoff_flag = 0
    if 'holdoff' in args.mal_strat:
        print('Checking holdoff')
        if 'single' in args.mal_obj:
            target, target_conf, actual, actual_conf = mal_eval_single(
                mal_data_X, mal_data_Y, shared_weights)
            if target_conf > 0.8:
                print('Holding off')
                holdoff_flag = 1

    # tf.reset_default_graph()

    K.set_learning_phase(1)

    print('Malicious Agent on GPU %s' % gpu_id)
    # set enviornment
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)

    if args.dataset == 'census':
        x = tf.placeholder(shape=(None, gv.DATA_DIM), dtype=tf.float32)
        y = tf.placeholder(dtype=tf.int64)
    else:
        x = tf.placeholder(shape=(None, gv.IMAGE_ROWS, gv.IMAGE_COLS,
                                  gv.NUM_CHANNELS),
                           dtype=tf.float32)
        y = tf.placeholder(dtype=tf.int64)

    if 'MNIST' in args.dataset:
        agent_model = model_mnist(type=args.model_num)
    elif args.dataset == 'CIFAR-10':
        agent_model = cifar_10_model()
    elif args.dataset == 'census':
        agent_model = census_model_1()

    logits = agent_model(x)
    prediction = tf.nn.softmax(logits)
    eval_loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,
                                                       logits=logits))

    config = tf.ConfigProto(gpu_options=gv.gpu_options)
    # config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    K.set_session(sess)

    if t >= args.mal_delay and holdoff_flag == 0:
        if args.mal_obj == 'all':
            final_delta = mal_all_algs(x, y, logits, agent_model,
                                       shared_weights, sess, mal_data_X,
                                       mal_data_Y, t)
        elif args.mal_obj == 'single' or 'multiple' in args.mal_obj:
            final_delta, penul_delta = mal_single_algs(
                x, y, logits, agent_model, shared_weights, sess, mal_data_X,
                mal_data_Y, t, mal_visible, X_shard, Y_shard)
    elif t < args.mal_delay or holdoff_flag == 1:
        print('Delay/Hold-off')
        final_delta, _ = benign_train(x, y, agent_model, logits, X_shard,
                                      Y_shard, sess, shared_weights)

    final_weights = shared_weights + final_delta
    agent_model.set_weights(final_weights)

    print('---Eval at mal agent---')
    if 'single' in args.mal_obj:
        target, target_conf, actual, actual_conf = mal_eval_single(
            mal_data_X, mal_data_Y, final_weights)
        print(
            'Target:%s with conf. %s, Curr_pred on malicious model for iter %s:%s with conf. %s'
            % (target, target_conf, t, actual, actual_conf))
    elif 'multiple' in args.mal_obj:
        suc_count_local = mal_eval_multiple(mal_data_X, mal_data_Y,
                                            final_weights)
        print('%s of %s targets achieved' % (suc_count_local, args.mal_num))

    eval_success, eval_loss = eval_minimal(X_test, Y_test, final_weights)
    return_dict['mal_success'] = eval_success
    print('Malicious Agent: success {}, loss {}'.format(
        eval_success, eval_loss))
    write_dict = {}
    # just to maintain ordering
    write_dict['t'] = t + 1
    write_dict['eval_success'] = eval_success
    write_dict['eval_loss'] = eval_loss
    file_write(write_dict, purpose='mal_eval_loss')

    return_dict[str(gv.mal_agent_index)] = np.array(final_delta)
    np.save(gv.dir_name + 'mal_delta_t%s.npy' % t, final_delta)

    if 'auto' in args.mal_strat or 'multiple' in args.mal_obj:
        penul_weights = shared_weights + penul_delta
        if 'single' in args.mal_obj:
            target, target_conf, actual, actual_conf = mal_eval_single(
                mal_data_X, mal_data_Y, penul_weights)
            print(
                'Penul weights ---- Target:%s with conf. %s, Curr_pred on malicious model for iter %s:%s with conf. %s'
                % (target, target_conf, t, actual, actual_conf))
        elif 'multiple' in args.mal_obj:
            suc_count_local = mal_eval_multiple(mal_data_X, mal_data_Y,
                                                penul_weights)
            print('%s of %s targets achieved' %
                  (suc_count_local, args.mal_num))

        eval_success, eval_loss = eval_minimal(X_test, Y_test, penul_weights)
        print('Penul weights ---- Malicious Agent: success {}, loss {}'.format(
            eval_success, eval_loss))

    return