Exemplo n.º 1
0
def run_experiment(args):
    print('-' * 10 + 'TRAIN TARGET' + '-' * 10 + '\n')
    dataset = load_data('target_data.npz', args)
    v_dataset = load_data('shadow0_data.npz', args)
    train_x, train_y, test_x, test_y = dataset
    true_x = np.vstack((train_x, test_x))
    true_y = np.append(train_y, test_y)
    batch_size = args.target_batch_size

    pred_y, membership, test_classes, classifier, aux = train_target_model(
        args=args,
        dataset=dataset,
        epochs=args.target_epochs,
        batch_size=args.target_batch_size,
        learning_rate=args.target_learning_rate,
        clipping_threshold=args.target_clipping_threshold,
        n_hidden=args.target_n_hidden,
        l2_ratio=args.target_l2_ratio,
        model=args.target_model,
        privacy=args.target_privacy,
        dp=args.target_dp,
        epsilon=args.target_epsilon,
        delta=args.target_delta,
        save=args.save_model)
    train_loss, train_acc, test_loss, test_acc = aux
    per_instance_loss = np.array(log_loss(true_y, pred_y))
   
    # Yeom's membership inference attack when only train_loss is known 
    yeom_mi_outputs_1 = yeom_membership_inference(per_instance_loss, membership, train_loss)
    # Yeom's membership inference attack when both train_loss and test_loss are known - Adversary 2 of Yeom et al.
    yeom_mi_outputs_2 = yeom_membership_inference(per_instance_loss, membership, train_loss, test_loss)

    # Proposed membership inference attacks
    proposed_mi_outputs = proposed_membership_inference(v_dataset, true_x, true_y, classifier, per_instance_loss, args)
    evaluate_proposed_membership_inference(per_instance_loss, membership, proposed_mi_outputs, fpr_threshold=0.01)
    evaluate_proposed_membership_inference(per_instance_loss, membership, proposed_mi_outputs, fpr_threshold=0.01, per_class_thresh=True)

    if not os.path.exists(RESULT_PATH+args.train_dataset):
        os.makedirs(RESULT_PATH+args.train_dataset)
    
    if args.target_privacy == 'no_privacy':
        pickle.dump([aux, membership, per_instance_loss, yeom_mi_outputs_1, yeom_mi_outputs_2, proposed_mi_outputs], open(RESULT_PATH+args.train_dataset+'/'+str(args.target_test_train_ratio)+'_'+args.target_model+'_'+args.target_privacy+'_'+str(args.target_l2_ratio)+'_'+str(args.run)+'.p', 'wb'))	
    else:
        pickle.dump([aux, membership, per_instance_loss, yeom_mi_outputs_1, yeom_mi_outputs_2, proposed_mi_outputs], open(RESULT_PATH+args.train_dataset+'/'+str(args.target_test_train_ratio)+'_'+args.target_model+'_'+args.target_privacy+'_'+args.target_dp+'_'+str(args.target_epsilon)+'_'+str(args.run)+'.p', 'wb'))
Exemplo n.º 2
0
def run_experiment(args):
    print('-' * 10 + 'TRAIN TARGET' + '-' * 10 + '\n')
    dataset = load_data('target_data.npz', args)
    train_x, train_y, test_x, test_y = dataset
    true_x = np.vstack((train_x, test_x))
    true_y = np.append(train_y, test_y)
    batch_size = args.target_batch_size

    pred_y, membership, test_classes, classifier, aux = train_target_model(
        args=args,
        dataset=dataset,
        epochs=args.target_epochs,
        batch_size=args.target_batch_size,
        learning_rate=args.target_learning_rate,
        clipping_threshold=args.target_clipping_threshold,
        n_hidden=args.target_n_hidden,
        l2_ratio=args.target_l2_ratio,
        model=args.target_model,
        privacy=args.target_privacy,
        dp=args.target_dp,
        epsilon=args.target_epsilon,
        delta=args.target_delta,
        save=args.save_model)
    train_loss, train_acc, test_loss, test_acc = aux
    per_instance_loss = np.array(log_loss(true_y, pred_y))

    features = get_random_features(true_x, range(true_x.shape[1]), 5)
    print(features)

    # Yeom's membership inference attack when only train_loss is known
    pred_membership = yeom_membership_inference(per_instance_loss, membership,
                                                train_loss)
    fpr, tpr, thresholds = roc_curve(membership, pred_membership, pos_label=1)
    yeom_mem_adv = tpr[1] - fpr[1]

    # Shokri's membership inference attack based on shadow model training
    shokri_mem_adv, shokri_mem_confidence = shokri_membership_inference(
        args, pred_y, membership, test_classes)

    # Yeom's attribute inference attack when train_loss is known - Adversary 4 of Yeom et al.
    pred_membership_all = yeom_attribute_inference(true_x, true_y, classifier,
                                                   membership, features,
                                                   train_loss)
    yeom_attr_adv = []
    for pred_membership in pred_membership_all:
        fpr, tpr, thresholds = roc_curve(membership,
                                         pred_membership,
                                         pos_label=1)
        yeom_attr_adv.append(tpr[1] - fpr[1])

    if not os.path.exists(RESULT_PATH + args.train_dataset):
        os.makedirs(RESULT_PATH + args.train_dataset)

    if args.target_privacy == 'no_privacy':
        pickle.dump(
            [
                train_acc, test_acc, train_loss, membership, shokri_mem_adv,
                shokri_mem_confidence, yeom_mem_adv, per_instance_loss,
                yeom_attr_adv, pred_membership_all, features
            ],
            open(
                RESULT_PATH + args.train_dataset + '/' + args.target_model +
                '_' + 'no_privacy_' + str(args.l2_ratio) + '.p', 'wb'))
    else:
        pickle.dump(
            [
                train_acc, test_acc, train_loss, membership, shokri_mem_adv,
                shokri_mem_confidence, yeom_mem_adv, per_instance_loss,
                yeom_attr_adv, pred_membership_all, features
            ],
            open(
                RESULT_PATH + args.train_dataset + '/' + args.target_model +
                '_' + args.target_privacy + '_' + args.target_dp + '_' +
                str(args.target_epsilon) + '_' + str(args.run) + '.p', 'wb'))
        
        
        
        

    #所有的数据都是提前分配处理好,然后保存到本地的硬盘当中。
    #后续所有的训练操作都是从本地文件中读取数据,再进行训练。
    dataset = load_data(args.DATA_PATH + 'target_data.npz')
    print('Loading File From '+ args.DATA_PATH)
    print '-' * 10 + 'TRAIN TARGET' + '-' * 10 + '\n'
    attack_test_x, attack_test_y, test_classes = train_target_model(
        dataset=dataset,
        epochs=args.target_epochs,
        batch_size=args.target_batch_size,
        learning_rate=args.target_learning_rate,
        n_hidden=args.target_n_hidden,
        l2_ratio=args.target_l2_ratio,
        model=args.target_model,
        save=args.save_model,
        DATA_PATH = args.DATA_PATH,
        MODEL_PATH = args.MODEL_PATH)

    print '-' * 10 + 'TRAIN SHADOW' + '-' * 10 + '\n'
    attack_train_x, attack_train_y, train_classes = train_shadow_models(
        epochs=args.target_epochs,
        batch_size=args.target_batch_size,
        learning_rate=args.target_learning_rate,
        n_shadow=args.n_shadow,
        n_hidden=args.target_n_hidden,
        l2_ratio=args.target_l2_ratio,
        model=args.target_model,