Exemplo n.º 1
0
def agent(i, X_shard, Y_shard, t, gpu_id, return_dict, X_test, Y_test, lr=None):
    tf.keras.backend.set_learning_phase(1)

    args = gv.init()
    if lr is None:
        lr = args.eta
    print('Agent %s on GPU %s' % (i,gpu_id))
    # set environment
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)

    shared_weights = np.load(gv.dir_name + 'global_weights_t%s.npy' % t, allow_pickle=True)
    shard_size = len(X_shard)

    if 'theta{}'.format(gv.mal_agent_index) in return_dict.keys():
        pre_theta = return_dict['theta{}'.format(gv.mal_agent_index)]
    else:
        pre_theta = None

    # if i == 0:
    #     # eval_success, eval_loss = eval_minimal(X_test,Y_test,x, y, sess, prediction, loss)
    #     eval_success, eval_loss = eval_minimal(X_test,Y_test,shared_weights)
    #     print('Global success at time {}: {}, loss {}'.format(t,eval_success,eval_loss))

    if args.steps is not None:
        num_steps = args.steps
    else:
        num_steps = int(args.E * shard_size / args.B)

    # with tf.device('/gpu:'+str(gpu_id)):
    if args.dataset == 'census':
        x = tf.placeholder(shape=(None,
                                            gv.DATA_DIM), dtype=tf.float32)
        # y = tf.placeholder(dtype=tf.float32)
        y = tf.placeholder(dtype=tf.int64)
    else:
        x = tf.placeholder(shape=(None,
                                            gv.IMAGE_ROWS,
                                            gv.IMAGE_COLS,
                                            gv.NUM_CHANNELS), dtype=tf.float32)
        y = tf.placeholder(dtype=tf.int64)

    if 'MNIST' in args.dataset:
        agent_model = model_mnist(type=args.model_num)
    elif args.dataset == 'census':
        agent_model = census_model_1()
    elif args.dataset == 'CIFAR-10':
        agent_model = cifar10_model()
    else:
        return

    logits = agent_model(x)

    if args.dataset == 'census':
        # loss = tf.nn.sigmoid_cross_entropy_with_logits(
        #     labels=y, logits=logits)
        loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=y, logits=logits))
    else:
        loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=y, logits=logits))
    prediction = tf.nn.softmax(logits)

    if args.optimizer == 'adam':
        optimizer = tf.train.AdamOptimizer(
            learning_rate=lr).minimize(loss)
    elif args.optimizer == 'sgd':
        optimizer = tf.train.GradientDescentOptimizer(
            learning_rate=lr).minimize(loss)

    if args.k > 1:
        config = tf.ConfigProto(gpu_options=gv.gpu_options)
        # config.gpu_options.allow_growth = True
        sess = tf.Session(config=config)
    elif args.k == 1:
        sess = tf.Session()
    else:
        return
    tf.compat.v1.keras.backend.set_session(sess)
    sess.run(tf.global_variables_initializer())

    if pre_theta is not None:
        theta = pre_theta - gv.moving_rate * (pre_theta - shared_weights)
    else:
        theta = shared_weights
    agent_model.set_weights(theta)
    # print('loaded shared weights')

    start_offset = 0
    if args.steps is not None:
        start_offset = (t * args.B * args.steps) % (shard_size - args.B)

    for step in range(num_steps):
        offset = (start_offset + step * args.B) % (shard_size - args.B)
        X_batch = X_shard[offset: (offset + args.B)]
        Y_batch = Y_shard[offset: (offset + args.B)]
        Y_batch_uncat = np.argmax(Y_batch, axis=1)
        _, loss_val = sess.run([optimizer, loss], feed_dict={x: X_batch, y: Y_batch_uncat})
        if step % 1000 == 0:
            print('Agent %s, Step %s, Loss %s, offset %s' % (i, step, loss_val, offset))
            # local_weights = agent_model.get_weights()
            # eval_success, eval_loss = eval_minimal(X_test,Y_test,x, y, sess, prediction, loss)
            # print('Agent {}, Step {}: success {}, loss {}'.format(i,step,eval_success,eval_loss))

    local_weights = agent_model.get_weights()
    local_delta = local_weights - shared_weights

    # eval_success, eval_loss = eval_minimal(X_test,Y_test,x, y, sess, prediction, loss)
    eval_success, eval_loss = eval_minimal(X_test, Y_test, local_weights)

    print('Agent {}: success {}, loss {}'.format(i, eval_success, eval_loss))

    return_dict[str(i)] = np.array(local_delta)
    return_dict["theta{}".format(i)] = np.array(local_weights)

    np.save(gv.dir_name + 'ben_delta_%s_t%s.npy' % (i, t), local_delta)

    return
Exemplo n.º 2
0
def mal_agent(X_shard, Y_shard, mal_data_X, mal_data_Y, t, gpu_id, return_dict,
              mal_visible, X_test, Y_test):

    args = gv.args

    shared_weights = np.load(gv.dir_name + 'global_weights_t%s.npy' % t)

    holdoff_flag = 0
    if 'holdoff' in args.mal_strat:
        print('Checking holdoff')
        if 'single' in args.mal_obj:
            target, target_conf, actual, actual_conf = mal_eval_single(
                mal_data_X, mal_data_Y, shared_weights)
            if target_conf > 0.8:
                print('Holding off')
                holdoff_flag = 1

    # tf.reset_default_graph()

    K.set_learning_phase(1)

    print('Malicious Agent on GPU %s' % gpu_id)
    # set enviornment
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)

    if args.dataset == 'census':
        x = tf.placeholder(shape=(None, gv.DATA_DIM), dtype=tf.float32)
        y = tf.placeholder(dtype=tf.int64)
    else:
        x = tf.placeholder(shape=(None, gv.IMAGE_ROWS, gv.IMAGE_COLS,
                                  gv.NUM_CHANNELS),
                           dtype=tf.float32)
        y = tf.placeholder(dtype=tf.int64)

    if 'MNIST' in args.dataset:
        agent_model = model_mnist(type=args.model_num)
    elif args.dataset == 'CIFAR-10':
        agent_model = cifar_10_model()
    elif args.dataset == 'census':
        agent_model = census_model_1()

    logits = agent_model(x)
    prediction = tf.nn.softmax(logits)
    eval_loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,
                                                       logits=logits))

    config = tf.ConfigProto(gpu_options=gv.gpu_options)
    # config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    K.set_session(sess)

    if t >= args.mal_delay and holdoff_flag == 0:
        if args.mal_obj == 'all':
            final_delta = mal_all_algs(x, y, logits, agent_model,
                                       shared_weights, sess, mal_data_X,
                                       mal_data_Y, t)
        elif args.mal_obj == 'single' or 'multiple' in args.mal_obj:
            final_delta, penul_delta = mal_single_algs(
                x, y, logits, agent_model, shared_weights, sess, mal_data_X,
                mal_data_Y, t, mal_visible, X_shard, Y_shard)
    elif t < args.mal_delay or holdoff_flag == 1:
        print('Delay/Hold-off')
        final_delta, _ = benign_train(x, y, agent_model, logits, X_shard,
                                      Y_shard, sess, shared_weights)

    final_weights = shared_weights + final_delta
    agent_model.set_weights(final_weights)

    print('---Eval at mal agent---')
    if 'single' in args.mal_obj:
        target, target_conf, actual, actual_conf = mal_eval_single(
            mal_data_X, mal_data_Y, final_weights)
        print(
            'Target:%s with conf. %s, Curr_pred on malicious model for iter %s:%s with conf. %s'
            % (target, target_conf, t, actual, actual_conf))
    elif 'multiple' in args.mal_obj:
        suc_count_local = mal_eval_multiple(mal_data_X, mal_data_Y,
                                            final_weights)
        print('%s of %s targets achieved' % (suc_count_local, args.mal_num))

    eval_success, eval_loss = eval_minimal(X_test, Y_test, final_weights)
    return_dict['mal_success'] = eval_success
    print('Malicious Agent: success {}, loss {}'.format(
        eval_success, eval_loss))
    write_dict = {}
    # just to maintain ordering
    write_dict['t'] = t + 1
    write_dict['eval_success'] = eval_success
    write_dict['eval_loss'] = eval_loss
    file_write(write_dict, purpose='mal_eval_loss')

    return_dict[str(gv.mal_agent_index)] = np.array(final_delta)
    np.save(gv.dir_name + 'mal_delta_t%s.npy' % t, final_delta)

    if 'auto' in args.mal_strat or 'multiple' in args.mal_obj:
        penul_weights = shared_weights + penul_delta
        if 'single' in args.mal_obj:
            target, target_conf, actual, actual_conf = mal_eval_single(
                mal_data_X, mal_data_Y, penul_weights)
            print(
                'Penul weights ---- Target:%s with conf. %s, Curr_pred on malicious model for iter %s:%s with conf. %s'
                % (target, target_conf, t, actual, actual_conf))
        elif 'multiple' in args.mal_obj:
            suc_count_local = mal_eval_multiple(mal_data_X, mal_data_Y,
                                                penul_weights)
            print('%s of %s targets achieved' %
                  (suc_count_local, args.mal_num))

        eval_success, eval_loss = eval_minimal(X_test, Y_test, penul_weights)
        print('Penul weights ---- Malicious Agent: success {}, loss {}'.format(
            eval_success, eval_loss))

    return
Exemplo n.º 3
0
def agent(i,
          X_shard,
          Y_shard,
          t,
          gpu_id,
          return_dict,
          X_test,
          Y_test,
          lr=None):
    K.set_learning_phase(1)

    args = gv.args
    if lr is None:
        lr = args.eta
    print('Agent %s on GPU %s' % (i, gpu_id))
    ## Set environment
    #os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    #os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    os.environ['CUDA_VISIBLE_DEVICES'] = "0"

    ## Load global weights
    shared_weights = np.load(gv.dir_name + 'global_weights_t%s.npy' % t,
                             allow_pickle=True)
    shard_size = len(X_shard)

    # if i == 0:
    #     # eval_success, eval_loss = eval_minimal(X_test,Y_test,x, y, sess, prediction, loss)
    #     eval_success, eval_loss = eval_minimal(X_test,Y_test,shared_weights)
    #     print('Global success at time {}: {}, loss {}'.format(t,eval_success,eval_loss))

    if args.steps is not None:
        num_steps = args.steps
    else:
        # num_steps = num_epochs * num_batches_per_shard
        num_steps = int(args.E) * shard_size / args.B

    # with tf.device('/gpu:'+str(gpu_id)):
    ## Load data
    if args.dataset == 'census':
        x = tf.placeholder(shape=(None, gv.DATA_DIM), dtype=tf.float32)
        # y = tf.placeholder(dtype=tf.float32)
        y = tf.placeholder(dtype=tf.int64)
    else:
        x = tf.placeholder(shape=(None, gv.IMAGE_ROWS, gv.IMAGE_COLS,
                                  gv.NUM_CHANNELS),
                           dtype=tf.float32)
        y = tf.placeholder(dtype=tf.int64)

    ## Load model
    if 'MNIST' in args.dataset:
        agent_model = model_mnist(type=args.model_num)
    elif args.dataset == 'census':
        agent_model = census_model_1()

    logits = agent_model(x)

    if args.dataset == 'census':
        # loss = tf.nn.sigmoid_cross_entropy_with_logits(
        #     labels=y, logits=logits)
        loss = tf.reduce_mean(
            tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,
                                                           logits=logits))
    else:
        loss = tf.reduce_mean(
            tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,
                                                           logits=logits))
    #prediction = tf.nn.softmax(logits)

    if args.optimizer == 'adam':
        optimizer = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss)
    elif args.optimizer == 'sgd':
        optimizer = tf.train.GradientDescentOptimizer(
            learning_rate=lr).minimize(loss)

    if args.k > 1:
        config = tf.ConfigProto(gpu_options=gv.gpu_options)
        # config.gpu_options.allow_growth = True
        sess = tf.Session(config=config)
    elif args.k == 1:
        sess = tf.Session()
    K.set_session(sess)
    sess.run(tf.global_variables_initializer())

    agent_model.set_weights(shared_weights)

    start_offset = 0
    # Start training data from where we left off last time
    if args.steps is not None:
        start_offset = (t * args.B * args.steps) % (shard_size - args.B)

    for step in range(num_steps):
        offset = (start_offset + step * args.B) % (shard_size - args.B)
        X_batch = X_shard[offset:(offset + args.B)]
        Y_batch = Y_shard[offset:(offset + args.B)]
        Y_batch_uncat = np.argmax(Y_batch, axis=1)
        _, loss_val = sess.run([optimizer, loss],
                               feed_dict={
                                   x: X_batch,
                                   y: Y_batch_uncat
                               })
        if step % 100 == 0:
            print('Agent %s, Step %s, Loss %s, offset %s' %
                  (i, step, loss_val, offset))
            # local_weights = agent_model.get_weights()
            # eval_success, eval_loss = eval_minimal(X_test,Y_test,x, y, sess, prediction, loss)
            # print('Agent {}, Step {}: success {}, loss {}'.format(i,step,eval_success,eval_loss))

    local_weights = agent_model.get_weights()
    local_delta = local_weights - shared_weights

    # eval_success, eval_loss = eval_minimal(X_test,Y_test,x, y, sess, prediction, loss)
    # Compute TPR and loss for the local model
    eval_success, eval_loss = eval_minimal(X_test, Y_test, local_weights)

    print('Evaluation on test data - Agent {}: success {}, loss {}'.format(
        i, eval_success, eval_loss))

    return_dict[str(i)] = np.array(local_delta)

    np.save(gv.dir_name + 'ben_delta_%s_t%s.npy' % (i, t), local_delta)

    return