示例#1
0
    def run_protection(self, image_paths, th=0.04, sd=1e7, lr=10, max_step=500, batch_size=1, format='png',
                       separate_target=True, debug=False, no_align=False, exp="", maximize=True,
                       save_last_on_failed=True):

        current_param = "-".join([str(x) for x in [self.th, sd, self.lr, self.max_step, batch_size, format,
                                                   separate_target, debug]])

        image_paths, loaded_images = filter_image_paths(image_paths)

        if not image_paths:
            print("No images in the directory")
            return 3

        faces = Faces(image_paths, loaded_images, self.aligner, verbose=1, no_align=no_align)
        original_images = faces.cropped_faces

        if len(original_images) == 0:
            print("No face detected. ")
            return 2
        original_images = np.array(original_images)

        if current_param != self.protector_param:
            self.protector_param = current_param
            if self.protector is not None:
                del self.protector
            if batch_size == -1:
                batch_size = len(original_images)
            self.protector = FawkesMaskGeneration(self.feature_extractors_ls,
                                                  batch_size=batch_size,
                                                  mimic_img=True,
                                                  intensity_range=PREPROCESS,
                                                  initial_const=sd,
                                                  learning_rate=self.lr,
                                                  max_iterations=self.max_step,
                                                  l_threshold=self.th,
                                                  verbose=debug,
                                                  maximize=maximize,
                                                  keep_final=False,
                                                  image_shape=(IMG_SIZE, IMG_SIZE, 3),
                                                  loss_method='features',
                                                  tanh_process=True,
                                                  save_last_on_failed=save_last_on_failed,
                                                  )
        protected_images = generate_cloak_images(self.protector, original_images)
        faces.cloaked_cropped_faces = protected_images

        final_images, images_without_face = faces.merge_faces(
            reverse_process_cloaked(protected_images, preprocess=PREPROCESS),
            reverse_process_cloaked(original_images, preprocess=PREPROCESS))

        for i in range(len(final_images)):
            if i in images_without_face:
                continue
            p_img = final_images[i]
            path = image_paths[i]
            file_name = "{}_{}_cloaked.{}".format(".".join(path.split(".")[:-1]), self.mode, format)
            dump_image(p_img, file_name, format=format)

        print("Done!")
        return 1
示例#2
0
    def run_protection(self, image_paths, mode='mid', th=0.04, sd=1e9, lr=10, max_step=500, batch_size=1, format='png',
                       separate_target=True):
        if mode == 'custom':
            pass
        else:
            th, max_step, lr = self.mode2param(mode)

        start_time = time.time()

        if not image_paths:
            raise Exception("No images in the directory")
        with graph.as_default():
            faces = Faces(image_paths, self.aligner, verbose=1)

            original_images = faces.cropped_faces
            original_images = np.array(original_images)

            if separate_target:
                target_embedding = []
                for org_img in original_images:
                    org_img = org_img.reshape([1] + list(org_img.shape))
                    tar_emb = select_target_label(org_img, self.feature_extractors_ls, self.fs_names)
                    target_embedding.append(tar_emb)
                target_embedding = np.concatenate(target_embedding)
            else:
                target_embedding = select_target_label(original_images, self.feature_extractors_ls, self.fs_names)

            protected_images = generate_cloak_images(self.sess, self.feature_extractors_ls, original_images,
                                                     target_emb=target_embedding, th=th, faces=faces, sd=sd,
                                                     lr=lr, max_step=max_step, batch_size=batch_size)

            faces.cloaked_cropped_faces = protected_images

            cloak_perturbation = reverse_process_cloaked(protected_images) - reverse_process_cloaked(original_images)
            final_images = faces.merge_faces(cloak_perturbation)

            for p_img, cloaked_img, path in zip(final_images, protected_images, image_paths):
                file_name = "{}_{}_cloaked.{}".format(".".join(path.split(".")[:-1]), mode, format)
                dump_image(p_img, file_name, format=format)

            elapsed_time = time.time() - start_time
            print('attack cost %f s' % elapsed_time)
            print("Done!")
示例#3
0
    def run_protection(self,
                       image_paths,
                       mode='min',
                       th=0.04,
                       sd=1e9,
                       lr=10,
                       max_step=500,
                       batch_size=1,
                       format='png',
                       separate_target=True,
                       debug=True,
                       no_align=False):
        if mode == 'custom':
            pass
        else:
            th, max_step, lr = self.mode2param(mode)

        current_param = "-".join([
            str(x) for x in [
                mode, th, sd, lr, max_step, batch_size, format,
                separate_target, debug
            ]
        ])
        print("Fininshed Setting Parameters at: {}".format(datetime.now()))
        image_paths, loaded_images = filter_image_paths(image_paths)
        print("Finished Loading Images at: {}".format(datetime.now()))

        if not image_paths:
            print("No images in the directory")
            return 3

        with graph.as_default():
            faces = Faces(image_paths,
                          loaded_images,
                          self.aligner,
                          verbose=1,
                          no_align=no_align)
            original_images = faces.cropped_faces
            print("Finished Detecting Faces In The Images at: {}".format(
                datetime.now()))

            if len(original_images) == 0:
                print("No face detected. ")
                return 2
            original_images = np.array(original_images)

            with sess.as_default():
                if separate_target:
                    target_embedding = []
                    i = 0
                    print("Start image reshape at: {}".format(datetime.now()))
                    for org_img in original_images:
                        org_img = org_img.reshape([1] + list(org_img.shape))
                        print("Finished Image {} Reshape at: {}".format(
                            i, datetime.now()))
                        tar_emb = select_target_label(
                            org_img, self.feature_extractors_ls, self.fs_names)
                        print(
                            "Finished Target Embedding Image {} at: {}".format(
                                i, datetime.now()))
                        target_embedding.append(tar_emb)
                        i += 1
                    target_embedding = np.concatenate(target_embedding)
                else:
                    target_embedding = select_target_label(
                        original_images, self.feature_extractors_ls,
                        self.fs_names)
                print("Finished All Images Target Embedding at: {}".format(
                    datetime.now()))

                if current_param != self.protector_param:
                    self.protector_param = current_param

                    if self.protector is not None:
                        del self.protector

                    self.protector = FawkesMaskGeneration(
                        sess,
                        self.feature_extractors_ls,
                        batch_size=batch_size,
                        mimic_img=True,
                        intensity_range='imagenet',
                        initial_const=sd,
                        learning_rate=lr,
                        max_iterations=max_step,
                        l_threshold=th,
                        verbose=1 if debug else 0,
                        maximize=False,
                        keep_final=False,
                        image_shape=(224, 224, 3))

                print("Finished Initializing Fawkes Protector at: {}".format(
                    datetime.now()))

                protected_images = generate_cloak_images(
                    self.protector,
                    original_images,
                    target_emb=target_embedding)

                print("Finished Protecting Images at: {}".format(
                    datetime.now()))

                faces.cloaked_cropped_faces = protected_images

                final_images = faces.merge_faces(
                    reverse_process_cloaked(protected_images),
                    reverse_process_cloaked(original_images))
                print("Finished Merging Faces at: {}".format(datetime.now()))

        for p_img, path in zip(final_images, image_paths):
            file_name = "{}_{}_cloaked.{}".format(
                ".".join(path.split(".")[:-1]), mode, format)
            dump_image(p_img, file_name, format=format)

        print("Done! and Finished Exporting File at: {}".format(
            datetime.now()))
        return 1
示例#4
0
    def run_protection(self,
                       image_paths,
                       mode='low',
                       th=0.04,
                       sd=1e9,
                       lr=10,
                       max_step=500,
                       batch_size=1,
                       format='png',
                       separate_target=True,
                       debug=False):

        if mode == 'custom':
            pass
        else:
            th, max_step, lr = self.mode2param(mode)

        current_param = "-".join([
            str(x) for x in [
                mode, th, sd, lr, max_step, batch_size, format,
                separate_target, debug
            ]
        ])

        image_paths, loaded_images = filter_image_paths(image_paths)

        if not image_paths:
            raise Exception("No images in the directory")
        with graph.as_default():
            faces = Faces(image_paths, loaded_images, self.aligner, verbose=1)

            original_images = faces.cropped_faces
            original_images = np.array(original_images)

            with sess.as_default():
                if separate_target:
                    target_embedding = []
                    for org_img in original_images:
                        org_img = org_img.reshape([1] + list(org_img.shape))
                        tar_emb = select_target_label(
                            org_img, self.feature_extractors_ls, self.fs_names)
                        target_embedding.append(tar_emb)
                    target_embedding = np.concatenate(target_embedding)
                else:
                    target_embedding = select_target_label(
                        original_images, self.feature_extractors_ls,
                        self.fs_names)

                if current_param != self.protector_param:
                    self.protector_param = current_param

                    if self.protector is not None:
                        del self.protector

                    self.protector = FawkesMaskGeneration(
                        sess,
                        self.feature_extractors_ls,
                        batch_size=batch_size,
                        mimic_img=True,
                        intensity_range='imagenet',
                        initial_const=sd,
                        learning_rate=lr,
                        max_iterations=max_step,
                        l_threshold=th,
                        verbose=1 if debug else 0,
                        maximize=False,
                        keep_final=False,
                        image_shape=(224, 224, 3))

                protected_images = generate_cloak_images(
                    self.protector,
                    original_images,
                    target_emb=target_embedding)

                faces.cloaked_cropped_faces = protected_images

                cloak_perturbation = reverse_process_cloaked(
                    protected_images) - reverse_process_cloaked(
                        original_images)
                final_images = faces.merge_faces(cloak_perturbation)

        for p_img, path in zip(final_images, image_paths):
            file_name = "{}_{}_cloaked.{}".format(
                ".".join(path.split(".")[:-1]), mode, format)
            dump_image(p_img, file_name, format=format)

        print("Done!")
        return None
示例#5
0
    def run_protection(self,
                       unprotected,
                       protected,
                       image,
                       mode='min',
                       th=0.04,
                       sd=1e9,
                       lr=10,
                       max_step=500,
                       batch_size=1,
                       format='png',
                       separate_target=True,
                       debug=False,
                       no_align=False,
                       lang="spanish"):
        th, max_step, lr = self.mode2param(mode)

        current_param = "-".join([
            str(x) for x in [
                mode, th, sd, lr, max_step, batch_size, format,
                separate_target, debug
            ]
        ])

        img = load_image(unprotected + "/" + image)

        with graph.as_default():
            faces = Faces([unprotected + "/" + image], [img],
                          self.aligner,
                          verbose=1,
                          no_align=no_align)
            original_images = faces.cropped_faces

            if len(original_images) == 0:
                response = {
                    'module': 'makeup',
                    'status': 'No face detected',
                    'd1': 'No face detected',
                    'd2': 'Unhandled error',
                    'd3': 'No face detected'
                }
                return response
            original_images = np.array(original_images)

            with sess.as_default():
                target_embedding = select_target_label(
                    original_images, self.feature_extractors_ls, self.fs_names)

                if current_param != self.protector_param:
                    self.protector_param = current_param

                    if self.protector is not None:
                        del self.protector

                    self.protector = FawkesMaskGeneration(
                        sess,
                        self.feature_extractors_ls,
                        batch_size=batch_size,
                        mimic_img=True,
                        intensity_range='imagenet',
                        initial_const=sd,
                        learning_rate=lr,
                        max_iterations=max_step,
                        l_threshold=th,
                        verbose=1 if debug else 0,
                        maximize=False,
                        keep_final=False,
                        image_shape=(224, 224, 3))

                protected_images = generate_cloak_images(
                    self.protector,
                    original_images,
                    target_emb=target_embedding)

                faces.cloaked_cropped_faces = protected_images

                final_images = faces.merge_faces(
                    reverse_process_cloaked(protected_images),
                    reverse_process_cloaked(original_images))

        backend_img = protected + "/" + image
        dump_image(final_images[0], backend_img, format=format)

        if (lang == "english"):
            response = {
                'module':
                'makeup',
                'status':
                'MakeUp Ok',
                'mode':
                mode,
                'd1':
                'Face attacked',
                'd2':
                'Mode: {}'.format(mode),
                'd3':
                'The face of this avatar has been attacked ' +
                'and although it looks identical to the original ' +
                'photo the reality is that you will see that ' +
                'the face has alterations that make it different ' +
                'from the original photo.',
                'backend_img':
                backend_img
            }
        else:
            response = {
                'module':
                'makeup',
                'status':
                'MakeUp Ok',
                'mode':
                mode,
                'd1':
                'Rostro atacado',
                'd2':
                'Modo: {}'.format(mode),
                'd3':
                'El rostro de este avatar ha sido atacado y ' +
                'aunque parezca idéntico a la foto original la ' +
                'realidad es que verá que el rostro tiene altera' +
                'ciones que la hacen diferente a la foto original.',
                'backend_img':
                backend_img
            }

        response["url_img"] = backend_img[backend_img.find("/static"):]
        return response