Esempio n. 1
0
elif flags.data_type == 'before_defense':
    classifier_data_path = osp.join(data_path, flags.attack_folder)
    output_path = create_dir(
        osp.join(classifier_data_path, flags.defense_folder,
                 flags.output_folder_name))
elif flags.data_type == 'after_defense':
    classifier_data_path = osp.join(data_path, flags.attack_folder,
                                    flags.defense_folder)
    output_path = create_dir(
        osp.join(classifier_data_path, flags.output_folder_name))
else:
    assert False, 'wrong data_type: %s' % flags.data_type

# load configuration
if flags.data_type == 'target':
    conf = Conf.load(osp.join(classifier_data_path, 'attack_configuration'))
elif flags.data_type == 'adversarial':
    conf = Conf.load(osp.join(classifier_data_path, 'attack_configuration'))
elif flags.data_type == 'source':
    conf = Conf.load(osp.join(classifier_data_path, 'defense_configuration'))
elif flags.data_type == 'before_defense':
    conf = Conf.load(
        osp.join(classifier_data_path, flags.defense_folder,
                 'defense_configuration'))
elif flags.data_type == 'after_defense':
    conf = Conf.load(osp.join(classifier_data_path, 'defense_configuration'))
else:
    assert False, 'wrong data_type: %s' % flags.data_type

# update classifier configuration
conf.classifier_path = classifier_path
Esempio n. 2
0
print('Evaluate attack flags:', flags)

# define basic parameters
top_out_dir = osp.dirname(osp.dirname(
    osp.abspath(__file__)))  # Use to save Neural-Net check-points etc.
data_path = osp.join(top_out_dir, flags.ae_folder, 'eval')
files = [
    f for f in os.listdir(data_path) if osp.isfile(osp.join(data_path, f))
]

output_path = create_dir(osp.join(data_path, flags.output_folder_name))

outlier_thresh = 0.05

# load attack configuration
conf = Conf.load(osp.join(output_path, 'attack_configuration'))

# load data
point_clouds, latent_vectors, reconstructions, pc_classes, slice_idx, ae_loss = \
    load_data(data_path, files, ['point_clouds_test_set', 'latent_vectors_test_set', 'reconstructions_test_set',
                                 'pc_classes', 'slice_idx_test_set', 'ae_loss_test_set'])

assert np.all(
    ae_loss > 0
), 'Note: not all autoencoder loss values are larger than 0 as they should!'

nn_idx_dict = {
    'latent_nn': 'latent_nn_idx_test_set',
    'chamfer_nn_complete': 'chamfer_nn_idx_complete_test_set'
}
nn_idx = load_data(data_path, files, [nn_idx_dict[conf.target_pc_idx_type]])
Esempio n. 3
0
def train():
    pt_graph = tf.Graph()
    with pt_graph.as_default():
        with tf.device('/gpu:'+str(GPU_INDEX)):
            pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT)
            is_training_pl = tf.placeholder(tf.bool, shape=())
            
            # Note the global_step=batch parameter to minimize. 
            # That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains.
            batch = tf.Variable(0)
            bn_decay = get_bn_decay(batch)
            tf.summary.scalar('bn_decay', bn_decay)

             # Get model
            pred, end_points = MODEL.get_model(pointclouds_pl,
                                                is_training_pl,
                                                bn_decay=bn_decay,
                                                use_input_trans=USE_INPUT_TRANS,
                                                use_feature_trans=USE_FEATURE_TRANS,
                                                )
    if FLAGS.ae_feature:
        feature_graph = tf.Graph()
        with feature_graph.as_default():
            # with tf.device('/gpu:'+str(GPU_INDEX)):
            ae_configuration = osp.join(FLAGS.ae_path, 'configuration')
            ae_conf = Conf.load(ae_configuration)
            ae_conf.experiment_name = 'all_class_ae'
            ae_conf.encoder_args['verbose'] = False
            ae_conf.decoder_args['verbose'] = False
            ae = PointNetAutoEncoder(ae_conf.experiment_name, ae_conf)

    if FLAGS.ss_feature:
        feature_graph = tf.Graph()
        with feature_graph.as_default():
            with tf.device('/gpu:'+str(GPU_INDEX)):
                model_ss = importlib.import_module('dgcnn_reconstruct') # import network module
               
                pointclouds_pl_ss, labels_pl_ss = model_ss.placeholder_inputs(BATCH_SIZE, NUM_POINT)
                is_training_pl_ss = tf.placeholder(tf.bool, shape=())

                # simple model
                pred_ss, end_points_ss = model_ss.get_model(pointclouds_pl_ss, is_training_pl_ss)
                # loss = model_ss.get_loss(pred, labels_pl, end_points)
                # all_nodes = feature_graph.get_operations()
                # for n in all_nodes:
                #     print(n.name)


                
                
    # Create a session
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    config.log_device_placement = False
    #sess = tf.Session(config=config)

    sess = tf.Session(graph=pt_graph, config=config)

    if FLAGS.ae_feature or FLAGS.ss_feature:
        feature_sess = tf.Session(graph=feature_graph, config=config)


    # To fix the bug introduced in TF 0.12.1 as in
    # http://stackoverflow.com/questions/41543774/invalidargumenterror-for-tensor-bool-tensorflow-0-12-1
    #sess.run(init)

    # Restore weights
    with sess.as_default():
        with pt_graph.as_default():
            # with tf.device('/gpu:'+str(GPU_INDEX)):
            saver = tf.train.Saver()
            saver.restore(sess, MODEL_PATH)
            print("Restored previous weights")
            ops = {'pointclouds_pl': pointclouds_pl,
            'labels_pl': labels_pl,
            'is_training_pl': is_training_pl,
            'pred': pred,
            'step': batch}
            sys.stdout.flush()
            

    if FLAGS.ae_feature or FLAGS.ss_feature:
        with feature_sess.as_default():
            with feature_graph.as_default():
                # with tf.device('/gpu:'+str(GPU_INDEX)):
                # Load pre-trained AE
                # if FLAGS.ae_feature == 'latent_gan':
                if FLAGS.ae_feature:
                    ae.restore_model(FLAGS.ae_path, 1000, verbose=True)
                    print("loaded AE model")

                elif FLAGS.ss_feature:
                    feature_saver = tf.train.Saver()
                    
                    # print('\n\n\n\n\n\n')
                    # print_tensors_in_checkpoint_file(file_name=FLAGS.ss_path, all_tensors=False, tensor_name='')
                    ops_ss = {'pointclouds_pl': pointclouds_pl_ss,
                            'labels_pl': labels_pl_ss,
                            'is_training_pl': is_training_pl_ss,
                            'pred': pred_ss
                            }
                    feature_saver.restore(feature_sess, FLAGS.ss_path)
                    print("part prediction model + dgcnn loaded")


    with sess.as_default():
        X_train, X_test, y_train, y_test = data_loader.get_pointcloud(FLAGS.dataset)
        if FLAGS.ae_feature:
            result, labels = get_feature_svm(sess, ops, X_train, y_train,  ae, feature_sess)
        elif FLAGS.ss_feature:
            result, labels = get_feature_svm(sess, ops, X_train, y_train,  None, feature_sess, ops_ss)
        else:
            result, labels = get_feature_svm(sess, ops, X_train, y_train)
        
    if FLAGS.add_fc:
        feature_train = result
        fc_graph = tf.Graph()
        with fc_graph.as_default():
            
            feature_pl = tf.placeholder(tf.float32, shape=(BATCH_SIZE, feature_train.shape[1]))
            labels_pl = tf.placeholder(tf.int32, shape=(BATCH_SIZE, ))

            is_training_pl = tf.placeholder(tf.bool, shape=())

            train_prediction, loss, optimizer = fc_svm(feature_pl, labels_pl, is_training=is_training_pl, layer_dim = (512, 128))

            fc_sess = tf.Session(graph=fc_graph, config=config)
            
            def train_eval_svm(feature, labels, is_training):
                file_size = feature.shape[0]
                num_batches = file_size // BATCH_SIZE
                loss_total = 0.0
                acc = 0.0
                for batch_idx in range(num_batches):
                    start_idx = batch_idx * BATCH_SIZE
                    end_idx = (batch_idx+1) * BATCH_SIZE
                    f = feature[start_idx:end_idx, :]
                    l = labels[start_idx:end_idx]
                 
                    _, lo, predictions = fc_sess.run([optimizer, loss, train_prediction], {is_training_pl: is_training, feature_pl: f, labels_pl: l} )
                    loss_total += lo
                    acc += svm_accuracy(predictions, l)
                loss_total = loss_total/num_batches
                acc = acc/num_batches
                return loss_total, acc

            with fc_sess.as_default():
                tf.initialize_all_variables().run()
                for step in range(10001):
                    train_loss, train_acc = train_eval_svm(feature_train, y_train, True)

                    
                    if step % 500 == 0:
                        print('step:{} train loss:{:.6f} train accuracy: {:.2f}'.format(
                                step, train_loss, train_acc))

                        if FLAGS.ae_feature:
                            feature_test = get_feature_svm(sess, ops, X_test, y_test,  ae, feature_sess)
                        elif FLAGS.ss_feature:
                            result = get_feature_svm(sess, ops, X_train, y_train,  ae, feature_sess, ops_ss)
                        else:
                            feature_test = get_feature_svm(sess, ops, X_test, y_test)
                        test_loss, test_acc = train_eval_svm(feature_test, y_test, False)

                        print('step:{} test loss:{:.6f} test accuracy: {:.2f}'.format(
                                step, test_loss, test_acc))

    else:
        accuracies = []
        percentages = []
        for percentage in PERCENTAGES:
            indices = sample_without_replacement(result.shape[0], int(result.shape[0]*percentage/100))
            # pred = fc_svm(result[indices], labels[indices], is_training_pl)
            # print(f"Percentage of training set:{percentage}, Train accuracy: {svm_accuracy(result[indices], labels[indices])}"
            clf = LinearSVC(penalty='l2', C=C, dual=False, max_iter=10000)
            clf.fit(result[indices], labels[indices])
            print(f"Percentage of training set:{percentage}, Train accuracy: {clf.score(result[indices], labels[indices])}")
            percentages.append(percentage)
            if FLAGS.ae_feature:
                accuracies.append(eval_one_epoch(sess, ops, clf, X_test, y_test,  ae, feature_sess))
            elif FLAGS.ss_feature:
                accuracies.append(eval_one_epoch(sess, ops, clf, X_test, y_test,  None, feature_sess, ops_ss))
            else:
                accuracies.append(eval_one_epoch(sess, ops, clf, X_test, y_test))

        plt.rcParams["font.family"] = "serif"
        fig, ax = plt.subplots(dpi=150, figsize=(8,6))
        ax.semilogx(percentages, accuracies, marker='.')
        ax.set_xlabel('% of Labeled Data Used')
        ax.set_ylabel('Classification Accuracy')
        # ax.set(xlabel='% of Labeled Data Used', ylabel='Classification Accuracy')
        ax.set_xlim([1, 100])
        ax.grid(True, which='both')
        # ax.set_ylim([0,1])
        fig_path = os.path.join(os.path.split(MODEL_PATH)[0], f"svm_accuracy_C_{C}.png")
        fig.savefig(fig_path)
        fig.savefig(fig_path[:-3]+'svg')
        print(f"Figure saved to {fig_path}")
Esempio n. 4
0
print('Test autoencoder flags:', flags)

assert flags.set_type in ['train_set', 'val_set',
                          'test_set'], 'wrong set_type: %s' % flags.set_type

# define basic parameters
project_dir = osp.dirname(osp.dirname(osp.abspath(__file__)))
top_in_dir = osp.join(project_dir, 'data',
                      'shape_net_core_uniform_samples_2048'
                      )  # Top-dir of where point-clouds are stored.
top_out_dir = osp.join(project_dir)  # Use to save Neural-Net check-points etc.

# Load train configuration
train_dir = create_dir(osp.join(top_out_dir, flags.train_folder))
restore_epoch = flags.restore_epoch
conf = Conf.load(train_dir + '/configuration')
conf.encoder_args['return_layer_before_symmetry'] = True

# Load point clouds
object_class = conf.object_class
class_names = conf.class_names
pc_data, slice_idx, pc_label = load_dataset(class_names, flags.set_type,
                                            top_in_dir)
point_clouds = pc_data.point_clouds.copy()

# Sort point cloud axes
if conf.sort_axes:
    point_clouds_axes_sorted = sort_axes(point_clouds)
    point_clouds = point_clouds_axes_sorted

    show = False
Esempio n. 5
0
}
nn_idx = load_data(data_path, files, [nn_idx_dict[flags.target_pc_idx_type]])

correct_pred = None
if flags.correct_pred_only:
    pc_labels, pc_pred_labels = load_data(
        data_path, files, ['pc_label_test_set', 'pc_pred_labels_test_set'])
    correct_pred = (pc_labels == pc_pred_labels)

# load indices for attack
attack_pc_idx = np.load(osp.join(top_out_dir, flags.attack_pc_idx))
attack_pc_idx = attack_pc_idx[:, :flags.num_pc_for_attack]

# load autoencoder configuration
ae_dir = osp.join(top_out_dir, flags.ae_folder)
conf = Conf.load(osp.join(ae_dir, 'configuration'))

# update autoencoder configuration
conf.ae_dir = ae_dir
conf.ae_name = 'autoencoder'
conf.ae_restore_epoch = flags.restore_epoch
conf.encoder_args['return_layer_before_symmetry'] = False
conf.encoder_args[
    'b_norm_decay'] = 1.  # for avoiding the update of batch normalization moving_mean and moving_variance parameters
conf.decoder_args[
    'b_norm_decay'] = 1.  # for avoiding the update of batch normalization moving_mean and moving_variance parameters
conf.decoder_args[
    'b_norm_decay_finish'] = 1.  # for avoiding the update of batch normalization moving_mean and moving_variance parameters

# attack configuration
conf.experiment_name = 'adversary'
Esempio n. 6
0
# Load default train parameters
train_params = default_train_params()
train_params['training_epochs'] = flags.training_epochs

# Load default architecture
encoder, decoder, enc_args, dec_args = mlp_architecture(
    n_pc_points, bneck_size)

conf = Conf(n_input=[n_pc_points, 3],
            loss=ae_loss,
            training_epochs=train_params['training_epochs'],
            batch_size=train_params['batch_size'],
            denoising=train_params['denoising'],
            learning_rate=train_params['learning_rate'],
            train_dir=train_dir,
            loss_display_step=train_params['loss_display_step'],
            saver_step=train_params['saver_step'],
            z_rotate=train_params['z_rotate'],
            encoder=encoder,
            decoder=decoder,
            encoder_args=enc_args,
            decoder_args=dec_args)
conf.experiment_name = experiment_name
conf.held_out_step = 5  # how often to evaluate/print the loss on held_out data (if they are provided)
conf.object_class = object_class
conf.class_names = class_names
conf.sort_axes = flags.sort_axes
conf.encoder_args['return_layer_before_symmetry'] = True
conf.save(osp.join(train_dir, 'configuration'))

if flags.save_config_and_exit:
]

attack_dir = osp.join(top_out_dir, flags.ae_folder, 'eval',
                      flags.attack_folder)
output_path = create_dir(osp.join(attack_dir, flags.output_folder_name))
output_path_orig = create_dir(
    osp.join(attack_dir, flags.output_folder_name + '_orig'))

# load data
point_clouds, pc_classes, slice_idx = \
    load_data(data_path, files, ['point_clouds_test_set', 'pc_classes', 'slice_idx_test_set'])

num_points = point_clouds.shape[1]

# load attack configuration
conf = Conf.load(osp.join(attack_dir, 'attack_configuration'))

nn_idx_dict = {
    'latent_nn': 'latent_nn_idx_test_set',
    'chamfer_nn_complete': 'chamfer_nn_idx_complete_test_set'
}
nn_idx = load_data(data_path, files, [nn_idx_dict[conf.target_pc_idx_type]])

correct_pred = None
if conf.correct_pred_only:
    pc_labels, pc_pred_labels = load_data(
        data_path, files, ['pc_label_test_set', 'pc_pred_labels_test_set'])
    correct_pred = (pc_labels == pc_pred_labels)

# load indices for attack
attack_pc_idx = np.load(osp.join(top_out_dir, flags.attack_pc_idx))
bneck_size = 128         # Bottleneck-size of the AE
n_pc_points = 2048       # Number of points per model.

class_name = "chair"


# Load point-clouds.
syn_id = snc_category_to_synth_id()[class_name]
class_dir = osp.join(top_in_dir , syn_id)
all_pc_data = load_all_point_clouds_under_folder(class_dir, n_threads=8, file_ending='.ply', verbose=True)
print ('Shape of DATA =', all_pc_data.point_clouds.shape)


# Load pre-trained AE
reset_tf_graph()
ae_conf = Conf.load(ae_configuration)
ae_conf.encoder_args['verbose'] = False
ae_conf.decoder_args['verbose'] = False
ae = PointNetAutoEncoder(ae_conf.experiment_name, ae_conf)
ae.restore_model(ae_conf.train_dir, ae_epoch, verbose=True)



# Use AE to convert raw pointclouds to latent codes.
latent_codes = ae.get_latent_codes(all_pc_data.point_clouds)
latent_data = PointCloudDataSet(latent_codes)
print ('Shape of DATA =', latent_data.point_clouds.shape)



Esempio n. 9
0
print('Evaluate defense flags:', flags)

# define basic parameters
top_out_dir = osp.dirname(osp.dirname(osp.abspath(__file__)))  # Use to save Neural-Net check-points etc.
data_path = osp.join(top_out_dir, flags.ae_folder, 'eval')
files = [f for f in os.listdir(data_path) if osp.isfile(osp.join(data_path, f))]

attack_path = create_dir(osp.join(data_path, flags.attack_folder))

if flags.use_adversarial_data:
    output_path = create_dir(osp.join(attack_path, flags.output_folder_name))
else:
    output_path = create_dir(osp.join(attack_path, flags.output_folder_name + '_orig'))

# load attack configuration
conf = Conf.load(osp.join(attack_path, 'attack_configuration'))

# load data
point_clouds, latent_vectors, reconstructions, pc_classes, slice_idx, ae_loss = \
    load_data(data_path, files, ['point_clouds_test_set', 'latent_vectors_test_set', 'reconstructions_test_set',
                                 'pc_classes', 'slice_idx_test_set', 'ae_loss_test_set'])

assert np.all(ae_loss > 0), 'Note: not all autoencoder loss values are larger than 0 as they should!'

nn_idx_dict = {'latent_nn': 'latent_nn_idx_test_set', 'chamfer_nn_complete': 'chamfer_nn_idx_complete_test_set'}
nn_idx = load_data(data_path, files, [nn_idx_dict[conf.target_pc_idx_type]])

correct_pred = None
if conf.correct_pred_only:
    pc_labels, pc_pred_labels = load_data(data_path, files, ['pc_label_test_set', 'pc_pred_labels_test_set'])
    correct_pred = (pc_labels == pc_pred_labels)
Esempio n. 10
0
    idx = train_dir.rfind('_')
    tail = train_dir[idx+1:]
    if tail.isdigit():
        train_dir = train_dir[:idx+1] + str(int(tail)+1)
    else:
        train_dir = train_dir + '_1'
os.makedirs(train_dir)

conf = Conf(n_input = [n_pc_points, 3],
            loss = ae_loss,
            training_epochs = train_params['training_epochs'],
            batch_size = train_params['batch_size'],
            denoising = train_params['denoising'],
            learning_rate = train_params['learning_rate'],
            train_dir = train_dir,
            loss_display_step = train_params['loss_display_step'],
            saver_step = train_params['saver_step'],
            z_rotate = train_params['z_rotate'],
            encoder = encoder,
            decoder = decoder,
            encoder_args = enc_args,
            decoder_args = dec_args
           )
conf.experiment_name = experiment_name
conf.held_out_step = 5   # How often to evaluate/print out loss on 
                         # held_out data (if they are provided in ae.train() ).
conf.save(osp.join(train_dir, 'configuration'))

load_pre_trained_ae = False
restore_epoch = 500
if load_pre_trained_ae: