Esempio n. 1
0

output_dir = ''

with tf.Graph().as_default():
    with tf.Session() as sess:
        # embedding_output = tf.placeholder(tf.float32, shape=(None, 128))
        # Load model
        model = InceptionResnetV1Model()
        # Convert to classifier
        # model.convert_to_classifier()

        # face1 和 face2 可能是同人也可能不是同人,根据lables的值,True/[1,0]:同人;False/[0,1]表示非同人
        #目标攻击时加载数据
        # faces1, faces2, labels, filepaths1, filepaths2 = set_loader.load_construct_testset_1(200)
        faces1, faces2, labels, filepaths1, filepaths2, label_index = set_loader.load_testset(
            16)
        num_examples = len(faces1)
        batch_number = int(math.ceil(num_examples / batch_size))
        # thresholds = np.arange(0, 4, 0.01)
        threshold = 1.1
        # x_adv = []
        accuracy_1 = 0
        accuracy_2 = 0
        accuracy_3 = 0
        accuracy_4 = 0
        accuracy_5 = 0
        # generate the adversarial examples
        for ibatch in range(batch_number):
            bstart = ibatch * batch_size
            bend = min(bstart + batch_size, num_examples)
            print('batch size: {}'.format(bend - bstart))
Esempio n. 2
0
        self.layers.append(self.softmax_output)
        self.layer_names.append('probs')

    def fprop(self, x, set_ref=False):
        return dict(zip(self.layer_names, self.layers))


with tf.Graph().as_default():
    with tf.Session() as sess:
        # Load model
        model = InceptionResnetV1Model()
        # Convert to classifier
        model.convert_to_classifier()

        # Load pairs of faces and their labels in one-hot encoding
        faces1, faces2, labels = set_loader.load_testset(1000)

        # Create victims' embeddings using Facenet itself
        graph = tf.get_default_graph()
        phase_train_placeholder = graph.get_tensor_by_name("phase_train:0")
        feed_dict = {model.face_input: faces2, phase_train_placeholder: False}
        victims_embeddings = sess.run(model.embedding_output,
                                      feed_dict=feed_dict)

        # Define FGSM for the model
        steps = 1
        eps = 0.01
        alpha = eps / steps
        fgsm = FastGradientMethod(model)
        fgsm_params = {'eps': alpha, 'clip_min': 0., 'clip_max': 1.}
        adv_x = fgsm.generate(model.face_input, **fgsm_params)
Esempio n. 3
0
        self.layer_names.append('logits')

    def fprop(self, x, set_ref=False):
        return dict(zip(self.layer_names, self.layers))


with tf.Graph().as_default():
    with tf.Session() as sess:
        # Load model
        model = InceptionResnetV1Model()
        # Convert to classifier
        model.convert_to_classifier()

        # Load pairs of faces and their labels in one-hot encoding
        size = 100
        faces1, faces2, labels = set_loader.load_testset(size)

        # Create victims' embeddings using Facenet itself
        graph = tf.get_default_graph()
        phase_train_placeholder = graph.get_tensor_by_name("phase_train:0")
        feed_dict = {model.face_input: faces2, phase_train_placeholder: False}
        victims_embeddings = sess.run(model.embedding_output,
                                      feed_dict=feed_dict)

        # Define FGSM for the model
        steps = 1
        eps = 0.01
        alpha = eps / steps
        fgsm = FastGradientMethod(model)
        fgsm_params = {'eps': alpha, 'clip_min': 0., 'clip_max': 1.}
        adv_x = fgsm.generate(model.face_input, **fgsm_params)