def Generate_scores(img):    
        
    print("[INFO] loading and preprocessing image...")
    image = image_utils.load_img(img, target_size=(224, 224))
    image = image_utils.img_to_array(image)
    
    image = np.expand_dims(image, axis=0)
    image = preprocess_input(image)
    
    print("[INFO] loading network...")
    model = VGG16(weights="imagenet")
    
    print("[INFO] classifying image...")
    preds = model.predict(image)

    (__, inID, label) = decode_predictions(preds)[0][0]
    
    result=decode_predictions(preds, top=10)[0]
    
    display(Image(img))
    
    result_frame=pd.DataFrame(result).ix[:,1:]
    result_frame.columns=["item", "probability"]
    result_frame.index=result_frame.index +1
    #display(result_frame)

    import seaborn as sns
    import matplotlib.pyplot as plt
    get_ipython().run_line_magic('matplotlib', 'inline')
    
    plt.figure(figsize=(10,7))
    sns.set_style('white')
    sns.set_context('talk',font_scale=1.8)
    sns.set_color_codes("pastel")
    ax=sns.barplot(x='probability',y='item',data=result_frame,color="b", palette="Blues_r")
    sns.plt.title('What is it?')
    ax.set(xlim=(0, 1))
    ax.set(xlabel='Probability', ylabel='Object')
                convert_all_kernels_in_model(model)
        else:
            if include_top:
                weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels.h5',
                                        TF_WEIGHTS_PATH,
                                        cache_subdir='models',
                                        md5_hash='a7b3fe01876f51b976af0dea6bc144eb')
            else:
                weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
                                        TF_WEIGHTS_PATH_NO_TOP,
                                        cache_subdir='models',
                                        md5_hash='a268eb855778b3df3c7506639542a6af')
            model.load_weights(weights_path)
            if K.backend() == 'theano':
                convert_all_kernels_in_model(model)
    return model


if __name__ == '__main__':
    model = ResNet50(include_top=True, weights='imagenet')

    img_path = 'elephant.jpg'
    img = image.load_img(img_path, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    print('Input image shape:', x.shape)

    preds = model.predict(x)
    print('Predicted:', decode_predictions(preds))
def main():
    # ================================================
    # Load pre-trained model and remove higher level layers
    # ================================================
    print("Loading VGG19 pre-trained model...")
    base_model = VGG19(weights='imagenet')
    model = Model(input=base_model.input,
                  output=base_model.get_layer('block4_pool').output)

    # ================================================
    # Read images and convert them to feature vectors
    # ================================================
    imgs, filename_heads, X = [], [], []
    path = "db"
    print("Reading images from '{}' directory...\n".format(path))
    for f in os.listdir(path):

        # Process filename
        filename = os.path.splitext(f)  # filename in directory
        filename_full = os.path.join(path,f)  # full path filename
        head, ext = filename[0], filename[1]
        if ext.lower() not in [".jpg", ".jpeg"]:
            continue

        # Read image file
        img = image.load_img(filename_full, target_size=(224, 224))  # load
        imgs.append(np.array(img))  # image
        filename_heads.append(head)  # filename head

        # Pre-process for model input
        img = image.img_to_array(img)  # convert to array
        img = np.expand_dims(img, axis=0)
        img = preprocess_input(img)
        features = model.predict(img).flatten()  # features
        X.append(features)  # append feature extractor

    X = np.array(X)  # feature vectors
    imgs = np.array(imgs)  # images
    print("imgs.shape = {}".format(imgs.shape))
    print("X_features.shape = {}\n".format(X.shape))

    # ===========================
    # Find k-nearest images to each image
    # ===========================
    n_neighbours = 5 + 1  # +1 as itself is most similar
    knn = kNN()  # kNN model
    knn.compile(n_neighbors=n_neighbours, algorithm="brute", metric="cosine")
    knn.fit(X)

    # ==================================================
    # Plot recommendations for each image in database
    # ==================================================
    output_rec_dir = os.path.join("output", "rec")
    if not os.path.exists(output_rec_dir):
        os.makedirs(output_rec_dir)
    n_imgs = len(imgs)
    ypixels, xpixels = imgs[0].shape[0], imgs[0].shape[1]
    for ind_query in range(n_imgs):

        # Find top-k closest image feature vectors to each vector
        print("[{}/{}] Plotting similar image recommendations for: {}".format(ind_query+1, n_imgs, filename_heads[ind_query]))
        distances, indices = knn.predict(np.array([X[ind_query]]))
        distances = distances.flatten()
        indices = indices.flatten()
        indices, distances = find_topk_unique(indices, distances, n_neighbours)

        # Plot recommendations
        rec_filename = os.path.join(output_rec_dir, "{}_rec.png".format(filename_heads[ind_query]))
        x_query_plot = imgs[ind_query].reshape((-1, ypixels, xpixels, 3))
        x_answer_plot = imgs[indices].reshape((-1, ypixels, xpixels, 3))
        plot_query_answer(x_query=x_query_plot,
                          x_answer=x_answer_plot[1:],  # remove itself
                          filename=rec_filename)

    # ===========================
    # Plot tSNE
    # ===========================
    output_tsne_dir = os.path.join("output")
    if not os.path.exists(output_tsne_dir):
        os.makedirs(output_tsne_dir)
    tsne_filename = os.path.join(output_tsne_dir, "tsne.png")
    print("Plotting tSNE to {}...".format(tsne_filename))
    plot_tsne(imgs, X, tsne_filename)
Example #4
0
                              '(`image_dim_ordering="th"`). '
                              'For best performance, set '
                              '`image_dim_ordering="tf"` in '
                              'your Keras config '
                              'at ~/.keras/keras.json.')
                convert_all_kernels_in_model(model)
        else:
            weights_path = get_file(
                'vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
                TF_WEIGHTS_PATH_NO_TOP,
                cache_subdir='models')
            model.load_weights(weights_path)
            if K.backend() == 'theano':
                convert_all_kernels_in_model(model)
    return model


if __name__ == '__main__':
    model = VGG16(weights='imagenet')

    img_path = 'cat.jpg'
    img = image.load_img(img_path, target_size=(512, 512))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    print('Input image shape:', x.shape)

    preds = model.predict(x)

    print(preds.shape)
Example #5
0
def main():

    #Load the VVg19 model and remove the last layer for transfer learning
    print("Loading VGG19 pre-trained model...")

    base_model = VGG19(weights='imagenet')

    model = Model(input=base_model.input,
                  output=base_model.get_layer('block4_pool').output)

    # Read the images and embed into a vector space

    imgs, filename_heads, X = [], [], []

    path = os.path.join("data", "final")
    print("Reading the images from '{}' directory...\n".format(path))
    for f in os.listdir(path):

        # Process the ilename
        filename = os.path.splitext(f)  # filename in directory
        filename_full = os.path.join(path, f)  # full path filename
        head, ext = filename[0], filename[1]
        if ext.lower() not in [".jpg", ".jpeg"]:
            continue

        # Read the image
        img = image.load_img(filename_full, target_size=(224, 224))  # load
        imgs.append(np.array(img))  # image
        filename_heads.append(head)  # filename head

        # Pre-process the image
        img = image.img_to_array(img)  # convert to array
        img = np.expand_dims(img, axis=0)
        img = preprocess_input(img)
        features = model.predict(img).flatten()  # features
        X.append(features)  # append feature extractor

    X = np.array(X)  # the vectors
    imgs = np.array(imgs)  # the images
    print("imgs.shape = {}".format(imgs.shape))
    print("X_features.shape = {}\n".format(X.shape))

    # find the closest vectors using kNN:

    n_neighbours = 4 + 1  # +1 as itself is most similar
    knn = kNN()  # kNN model
    knn.compile(n_neighbors=n_neighbours, algorithm="brute", metric="cosine")
    knn.fit(X)

    # Plot the recommendations for each image in database

    output_rec_dir = os.path.join("output", "final_recommendations")

    n_imgs = len(imgs)
    ypixels, xpixels = imgs[0].shape[0], imgs[0].shape[1]
    for ind_query in range(n_imgs):

        # Find k closest image feature vectors to each vector
        print("[{}/{}] finding your recommendations: {}".format(
            ind_query + 1, n_imgs, filename_heads[ind_query]))
        distances, indices = knn.predict(np.array([X[ind_query]]))
        distances = distances.flatten()
        indices = indices.flatten()
        indices, distances = find_topk_unique(indices, distances, n_neighbours)

        # Plot the recommendations
        rec_filename = os.path.join(
            output_rec_dir, "{}_rec.png".format(filename_heads[ind_query]))
        x_query_plot = imgs[ind_query].reshape((-1, ypixels, xpixels, 3))
        x_answer_plot = imgs[indices].reshape((-1, ypixels, xpixels, 3))
        plot_query_answer(
            x_query=x_query_plot,
            x_answer=x_answer_plot[1:],  # remove itself
            filename=rec_filename)

    # finally, Plot the t-sne results for the dataset:

    output_tsne_dir = os.path.join("output")
    if not os.path.exists(output_tsne_dir):
        os.makedirs(output_tsne_dir)
    tsne_filename = os.path.join(output_tsne_dir, "tsne.png")
    print("Plotting tSNE to {}...".format(tsne_filename))
    plot_tsne(imgs, X, tsne_filename)
Example #6
0
def get_recommendations():

    print("Loading the VGG19 model")
    base_model = VGG19(weights='imagenet')
    model = Model(input=base_model.input,
                  output=base_model.get_layer('block4_pool').output)

    # Read images and convert them to feature vectors

    imgs, filename_heads, X = [], [], []

    path = os.path.join("data", "final")
    print("Reading the images from '{}' directory...\n".format(path))
    for f in os.listdir(path):

        # Process filename
        filename = os.path.splitext(f)  # filename in directory
        filename_full = os.path.join(path, f)  # full path filename
        head, ext = filename[0], filename[1]
        if ext.lower() not in [".jpg", ".jpeg"]:
            continue

        # Read image file
        img = image.load_img(filename_full, target_size=(224, 224))  # load
        imgs.append(np.array(img))  # image
        filename_heads.append(head)  # filename head

        # Pre-process for model input
        img = image.img_to_array(img)  # convert to array
        img = np.expand_dims(img, axis=0)
        img = preprocess_input(img)
        features = model.predict(img).flatten()  # features
        X.append(features)  # append feature extractor

    X = np.array(X)  # feature vectors
    imgs = np.array(imgs)  # images
    print("imgs.shape = {}".format(imgs.shape))
    print("X_features.shape = {}\n".format(X.shape))

    # Find k-nearest images to each image

    n_neighbours = 6 + 1  # +1 as itself is most similar
    knn = kNN()  # kNN model
    knn.compile(n_neighbors=n_neighbours, algorithm="brute", metric="cosine")
    knn.fit(X)

    # return the recommendations for each painting in the form of a dictionary

    output_rec_dir = os.path.join("output", "recommendations")

    n_imgs = len(imgs)
    ypixels, xpixels = imgs[0].shape[0], imgs[0].shape[1]
    recommendations = {}
    for ind_query in range(n_imgs):

        # Find k closest image feature vectors to each vector
        print("[{}/{}] finding your recommendations for painting {}".format(
            ind_query + 1, n_imgs, filename_heads[ind_query]))
        distances, indices = knn.predict(np.array([X[ind_query]]))
        distances = distances.flatten()
        indices = indices.flatten()
        indices, distances = find_topk_unique(indices, distances, n_neighbours)

        indices = indices[0][
            1:]  #remove the first painting (as it's obviously the closet one)
        wildcard = np.array([
            random.randrange(1, n_imgs) for _ in range(3)
        ])  #(add 3 random paintings in the next suggestion)
        indices = np.concatenate((indices, wildcard))
        indices = [filename_heads[index] for index in indices]
        recommendations.update({filename_heads[ind_query]: indices})

    return recommendations
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="path to the input image")
args = vars(ap.parse_args())

# cv2 operation to load image - not needed for final product
# orig = cv2.imread(args["image"])

# convert input using keras' helper, resize image to 224x224, finally get image as np array from pil

image = image_utils.load_img(args["image"], target_size=(224, 224))
image = image_utils.img_to_array(image)

#expand array dimensions from 3 to 4 for the network + subtract rgb pixel intensity from dataset

image = np.expand_dims(image, axis=0)
image = preprocess_input(image)

# load the VGG16 network
model = VGG16(weights="imagenet")

# classify the image
preds = model.predict(image)

magic = decode_predictions(preds)
#print magic
#print magic[0][0]
(inID, label, something) = decode_predictions(preds)[0][0]

# we only need the top prediction. the decode_predictions can be modified to give probabilities and other guesses.

# display the predictions to our screen