Exemplo n.º 1
0
    def like_or_dislike_users(self, users):
        # automatically like or dislike users based on your previously trained
        # model on your historical preference.

        # facenet settings from export_embeddings....
        model_dir = '20170512-110547'
        data_dir = 'temp_images_aligned'
        embeddings_name = 'temp_embeddings.npy'
        labels_name = 'temp_labels.npy'
        labels_strings_name = 'temp_label_strings.npy'
        is_aligned = True
        image_size = 160
        margin = 44
        gpu_memory_fraction = 1.0
        image_batch = 1000
        with tf.Graph().as_default():
            with tf.Session() as sess:
                # Load the facenet model
                facenet.load_model(model_dir)
                for user in users:
                    clean_temp_images()
                    urls = user.get_photos(width='640')
                    image_list = download_url_photos(urls,
                                                     user.id,
                                                     is_temp=True)
                    # align the database
                    tindetheus_align.main(input_dir='temp_images',
                                          output_dir='temp_images_aligned')
                    # export the embeddinggs from the aligned database

                    train_set = facenet.get_dataset(data_dir)
                    image_list_temp, label_list = facenet.get_image_paths_and_labels(
                        train_set)
                    label_strings = [
                        name
                        for name in os.listdir(os.path.expanduser(data_dir))
                        if os.path.isdir(
                            os.path.join(os.path.expanduser(data_dir), name))
                    ]

                    # Get input and output tensors
                    images_placeholder = tf.get_default_graph(
                    ).get_tensor_by_name("input:0")
                    embeddings = tf.get_default_graph().get_tensor_by_name(
                        "embeddings:0")
                    phase_train_placeholder = tf.get_default_graph(
                    ).get_tensor_by_name("phase_train:0")

                    # Run forward pass to calculate embeddings
                    nrof_images = len(image_list_temp)
                    print('Number of images: ', nrof_images)
                    batch_size = image_batch
                    if nrof_images % batch_size == 0:
                        nrof_batches = nrof_images // batch_size
                    else:
                        nrof_batches = (nrof_images // batch_size) + 1
                    print('Number of batches: ', nrof_batches)
                    embedding_size = embeddings.get_shape()[1]
                    emb_array = np.zeros((nrof_images, embedding_size))
                    start_time = time.time()

                    for i in range(nrof_batches):
                        if i == nrof_batches - 1:
                            n = nrof_images
                        else:
                            n = i * batch_size + batch_size
                        # Get images for the batch
                        if is_aligned is True:
                            images = facenet.load_data(
                                image_list_temp[i * batch_size:n], False,
                                False, image_size)
                        else:
                            images = load_and_align_data(
                                image_list_temp[i * batch_size:n], image_size,
                                margin, gpu_memory_fraction)
                        feed_dict = {
                            images_placeholder: images,
                            phase_train_placeholder: False
                        }
                        # Use the facenet model to calcualte embeddings
                        embed = sess.run(embeddings, feed_dict=feed_dict)
                        emb_array[i * batch_size:n, :] = embed
                        print('Completed batch', i + 1, 'of', nrof_batches)

                    run_time = time.time() - start_time
                    print('Run time: ', run_time)

                    #   export emedings and labels
                    label_list = np.array(label_list)

                    np.save(embeddings_name, emb_array)

                    if emb_array.size > 0:
                        # calculate the 128 average embedding per profiles
                        X = calc_avg_emb_temp(emb_array)
                        # ealuate on the model
                        yhat = self.model.predict(X)

                        if yhat[0] == 1:
                            didILike = 'Like'
                        else:
                            didILike = 'Dislike'
                    else:
                        # there were no faces in this profile
                        didILike = 'Dislike'
                    print(
                        '********************************************************'
                    )
                    print(user.name, user.age, didILike)
                    print(
                        '********************************************************'
                    )

                    dbase_names = move_images_temp(image_list, user.id)

                    if didILike == 'Like':
                        print(user.like())
                        self.likes_left -= 1
                    else:
                        print(user.dislike())
                    userList = [
                        user.id, user.name, user.age, user.bio,
                        user.distance_km, user.jobs, user.schools,
                        user.get_photos(width='640'), dbase_names, didILike
                    ]
                    self.al_database.append(userList)
                    np.save('al_database.npy', self.al_database)
                    clean_temp_images_aligned()
def main(model_dir='20170512-110547',
         data_dir='database_aligned',
         is_aligned=True,
         image_size=160,
         margin=44,
         gpu_memory_fraction=1.0,
         image_batch=1000,
         embeddings_name='embeddings.npy',
         labels_name='labels.npy',
         labels_strings_name='label_strings.npy',
         return_image_list=False):
    train_set = facenet.get_dataset(data_dir)
    image_list, label_list = facenet.get_image_paths_and_labels(train_set)
    # fetch the classes (labels as strings) exactly as it's done in get_dataset
    path_exp = os.path.expanduser(data_dir)
    classes = [
        path for path in os.listdir(path_exp)
        if os.path.isdir(os.path.join(path_exp, path))
    ]
    # get the label strings
    label_strings = [
        name for name in classes if os.path.isdir(os.path.join(path_exp, name))
    ]
    with tf.Graph().as_default():

        with tf.Session() as sess:

            # Load the model
            facenet.load_model(model_dir)

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")  # noqa: E501
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")  # noqa: E501
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")  # noqa: E501

            # Run forward pass to calculate embeddings
            nrof_images = len(image_list)
            print('Number of images: ', nrof_images)
            batch_size = image_batch
            if nrof_images % batch_size == 0:
                nrof_batches = nrof_images // batch_size
            else:
                nrof_batches = (nrof_images // batch_size) + 1
            print('Number of batches: ', nrof_batches)
            embedding_size = embeddings.get_shape()[1]
            emb_array = np.zeros((nrof_images, embedding_size))
            start_time = time.time()

            for i in range(nrof_batches):
                if i == nrof_batches - 1:
                    n = nrof_images
                else:
                    n = i * batch_size + batch_size
                # Get images for the batch
                if is_aligned is True:
                    images = facenet.load_data(image_list[i * batch_size:n],
                                               False, False, image_size)
                else:
                    images = load_and_align_data(image_list[i * batch_size:n],
                                                 image_size, margin,
                                                 gpu_memory_fraction)
                feed_dict = {
                    images_placeholder: images,
                    phase_train_placeholder: False
                }
                # Use the facenet model to calculate embeddings
                embed = sess.run(embeddings, feed_dict=feed_dict)
                emb_array[i * batch_size:n, :] = embed
                print('Completed batch', i + 1, 'of', nrof_batches)

            run_time = time.time() - start_time
            print('Run time: ', run_time)

            #   export embeddings and labels
            label_list = np.array(label_list)

            np.save(embeddings_name, emb_array)
            if emb_array.size > 0:
                labels_final = (label_list) - np.min(label_list)
                np.save(labels_name, labels_final)
                label_strings = np.array(label_strings)
                np.save(labels_strings_name, label_strings[labels_final])
                np.save('image_list.npy', image_list)
            if return_image_list:
                np.save('validation_image_list.npy', image_list)
                return image_list, emb_array
Exemplo n.º 3
0
    def like_or_dislike_users(self, users):
        # automatically like or dislike users based on your previously trained
        # model on your historical preference.

        # facenet settings from export_embeddings....
        data_dir = 'temp_images_aligned'
        embeddings_name = 'temp_embeddings.npy'
        # labels_name = 'temp_labels.npy'
        # labels_strings_name = 'temp_label_strings.npy'
        is_aligned = True
        image_size = 160
        margin = 44
        gpu_memory_fraction = 1.0
        image_batch = 1000
        prev_user = None
        for user in users:
            clean_temp_images()
            urls = user.get_photos(width='640')
            image_list = download_url_photos(urls, user.id,
                                             is_temp=True)
            # align the database
            tindetheus_align.main(input_dir='temp_images',
                                  output_dir='temp_images_aligned')
            # export the embeddings from the aligned database

            train_set = facenet.get_dataset(data_dir)
            image_list_temp, label_list = facenet.get_image_paths_and_labels(train_set)  # noqa: E501

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")  # noqa: E501
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")  # noqa: E501
            phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")  # noqa: E501

            # Run forward pass to calculate embeddings
            nrof_images = len(image_list_temp)
            print('Number of images: ', nrof_images)
            batch_size = image_batch
            if nrof_images % batch_size == 0:
                nrof_batches = nrof_images // batch_size
            else:
                nrof_batches = (nrof_images // batch_size) + 1
            print('Number of batches: ', nrof_batches)
            embedding_size = embeddings.get_shape()[1]
            emb_array = np.zeros((nrof_images, embedding_size))
            start_time = time.time()

            for i in range(nrof_batches):
                if i == nrof_batches - 1:
                    n = nrof_images
                else:
                    n = i*batch_size + batch_size
                # Get images for the batch
                if is_aligned is True:
                    images = facenet.load_data(image_list_temp[i*batch_size:n],  # noqa: E501
                                                False, False,
                                                image_size)
                else:
                    images = load_and_align_data(image_list_temp[i*batch_size:n],  # noqa: E501
                                                    image_size, margin,
                                                    gpu_memory_fraction)
                feed_dict = {images_placeholder: images,
                             phase_train_placeholder: False}
                # Use the facenet model to calculate embeddings
                embed = self.sess.run(embeddings, feed_dict=feed_dict)
                emb_array[i*batch_size:n, :] = embed
                print('Completed batch', i+1, 'of', nrof_batches)

            run_time = time.time() - start_time
            print('Run time: ', run_time)

            # export embeddings and labels
            label_list = np.array(label_list)

            np.save(embeddings_name, emb_array)

            if emb_array.size > 0:
                # calculate the n average embedding per profiles
                X = calc_avg_emb_temp(emb_array)
                # evaluate on the model
                yhat = self.model.predict(X)

                if yhat[0] == 1:
                    didILike = 'Like'
                    # check to see if this is the same user as before
                    if prev_user == user.id:
                        clean_temp_images_aligned()
                        print('\n\n You have already liked this user!!! \n \n')
                        print('This typically means you have used all of your'
                              ' free likes. Exiting program!!! \n\n')
                        self.likes_left = -1
                        return
                    else:
                        prev_user = user.id
                else:
                    didILike = 'Dislike'
            else:
                # there were no faces in this profile
                didILike = 'Dislike'
            print('**************************************************')
            print(user.name, user.age, didILike)
            print('**************************************************')

            dbase_names = move_images_temp(image_list, user.id)

            if didILike == 'Like':
                print(user.like())
                self.likes_left -= 1
            else:
                print(user.dislike())
            userList = [user.id, user.name, user.age, user.bio,
                        user.distance_km, user.jobs, user.schools,
                        user.get_photos(width='640'), dbase_names,
                        didILike]
            self.al_database.append(userList)
            np.save('al_database.npy', self.al_database)
            clean_temp_images_aligned()