def get(self, filename):
     print("filename::")
     print(filename)
     #imagePath = glob.glob("/Users/speedkevin/python/search-engine/dataset/"+filename)
     imagePath = glob.glob(PATH_DATASET_THEONE + filename)
     print("imagePath::")
     print(imagePath)
     imagePath = ''.join(imagePath)
     print("imagePath::")
     print(imagePath)
     k = imagePath[imagePath.rfind("/") + 1:]
     print("k::")
     print(k)
     image = cv2.imread(imagePath)
     print("image::")
     print(image)
     # initialize the index dictionary to store our our quantifed
     # images, with the 'key' of the dictionary being the image
     # filename and the 'value' our computed features
     index = {}
     # initialize our image descriptor -- a 3D RGB histogram with
     # 8 bins per channel
     desc = RGBHistogram([8, 8, 8])
     # 7 bins per channel
     # desc = RGBHistogram([1, 1, 1])
     features = desc.describe(image)
     index[k] = features
     features = [str(f) for f in features]
     return jsonify(features)
예제 #2
0
def search(queryImage, indexPath, mask=None):
    desc = RGBHistogram([8, 8, 8])
    queryFeatures = desc.describe(queryImage)

    index = cPickle.loads(open(indexPath).read())
    searcher = Searcher(index)
    results = searcher.search(queryFeatures)

    return results[0][0][1]
예제 #3
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument("-m",
                        "--models",
                        required=True,
                        help="Path to the directory that contains models")
    parser.add_argument("-d",
                        "--dataset",
                        required=True,
                        help="Path to the directory that contains images")
    parser.add_argument("-b",
                        "--bin_size",
                        required=False,
                        default=8,
                        help="Histogram columns number")
    parser.add_argument("-img",
                        "--image",
                        required=False,
                        default="Models/ryu_idle.png",
                        help="Image for which we're looking for similar ones")
    args = parser.parse_args()
    bin_size = args.bin_size

    model_image = cv.imread(args.image)
    images = load_images(args.dataset)

    desc = RGBHistogram([bin_size, bin_size, bin_size])
    image_descriptions = []
    model_description = (model_image, desc.describe(model_image))
    for i in images:
        image_descriptions.append((i, desc.describe(i)))

    results = []
    for im_desc in image_descriptions:
        d = chi2_distance(model_description[1], im_desc[1])
        results.append(d)

    sorted_images = [x[0] for _, x in sorted(zip(results, image_descriptions))]

    display_best_finds(
        model_description[0],
        sorted_images[:MAX_BEST_FITS_DISPLAY if len(image_descriptions) >
                      MAX_BEST_FITS_DISPLAY else len(image_descriptions)])
예제 #4
0
def montage(queryImage, indexPath, datasetPath, mask=None):
    desc = RGBHistogram([8, 8, 8])
    queryFeatures = desc.describe(queryImage)

    index = cPickle.loads(open(indexPath).read())
    searcher = Searcher(index)
    results = searcher.search(queryFeatures)

    rows = 200
    cols = 300
    montage = np.zeros((rows * 3, cols, 3), dtype="uint8")

    for j in range(0, 3):
        (score, imageName) = results[j][0]
        print("PATH {}:    {}     {}").format(j, datasetPath, imageName)
        path = datasetPath + "%s" % (imageName)
        result = cv2.imread(path)
        result = cv2.resize(result, (cols, rows))
        print "\t%d. %s : %.3f" % (j + 1, imageName, score)
        montage[j * rows:(j + 1) * rows, :] = result

    cv2.imshow("Results", montage)
    cv2.waitKey(0)
from rgbhistogram import RGBHistogram
from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
import numpy as np
import glob
import cv2

imagePaths = sorted(glob.glob("images/*.png"))
maskPaths = sorted(glob.glob("masks/*.png"))

data = []
target = []

desc = RGBHistogram([8, 8, 8])

for (imagePath, maskPath) in zip(imagePaths, maskPaths):
    image = cv2.imread(imagePath)
    mask = cv2.imread(maskPath)
    mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)

    features = desc.describe(image, mask)

    data.append(features)
    target.append(imagePath.split("_")[-2])

targetNames = np.unique(target)
le = LabelEncoder()
target = le.fit_transform(target)
예제 #6
0
ap = argparse.ArgumentParser
ap.add_argument("-i", "--images", required=True, help="path to image dataset")
ap.add_argument("-m", "--masks", required=True, help="path to the image masks")
args = vars(ap.parse_args())


# grab paths from args
imagePaths = sorted(glob.glob(args["images"] + "/*.png"))
maskPaths = sorted(glob.glob(args["masks"] + "/*.png"))

data = []
target = []

# yields 512 dimensional feature vector used to
# characterize the color of the flower
desc = RGBHistogram([8, 8, 8])

for (imagePath, maskPath) in zip(imagePaths, maskPaths):
    image = cv2.imread(imagePath)
    mask = cv2.imread(maskPath)
    # convert to grayscale
    mask = cv2.cv2Color(mask, cv2.COLOR_BGR2GRAY)

    features = desc.describe(image, mask)

    data.append(features)
    target.append(imagePath.split("_")[-2])

# encode labels
# unique finds unique species names, which are fed to labelencoder
targetNames = np.unique(target)
예제 #7
0
import cv2

# Constuct the arguments
ap = argparse.ArgumentParser()
ap.add_argument('-d', '--dataset', required=True,
    help = 'Path to the directory which contains the images')
ap.add_argument('-i', '--index', required=True,
    help = 'Path to where the computed index will be stored')
args = vars(ap.parse_args())

# index dictionary key - image filename, value - computed features
index = {}

# initialize 3D histogram image descriptor
# 8 bits per channel
desc = RGBHistogram([8, 8, 8])

# loop over all pngs in dataset directory
for path in glob.glob(args['dataset'] + '/*.jpg'):
    
    # extract filename
    k = path[path.rfind('/') + 1:]
    
    # load image, describe it with RGB histogram descriptor
    # and update index
    image = cv2.imread(path)
    features = desc.describe(image)
    index[k] = features
    
# index image
f = open(args['index'], 'w')
예제 #8
0
ap.add_argument("-i",
                "--index",
                required=True,
                help="Path to where we stored our index")
ap.add_argument("-q", "--query", required=True, help="Path to query image")
args = vars(ap.parse_args())

# load the query image and show it
queryImage = cv2.imread(args["query"])
cv2.imshow("Query", queryImage)
print("query: {}".format(args["query"]))

# describe the query in the same way that we did in
# index.py -- a 3D RGB histogram with
# 8 bins per channel
desc = RGBHistogram([8, 8, 8])
queryFeatures = desc.describe(queryImage)

# load the index perform the search
index = pickle.loads(open(args["index"], "rb").read())
searcher = Searcher(index)
results = searcher.search(queryFeatures)

# initialize the two montages to display our results --
# we have a total of 25 image in the index, but let's only
# display the top 10 results; % image per montage, with
# images thate are 400x166 pixels
montageA = np.zeros((166 * 5, 400, 3), dtype="uint8")
montageB = np.zeros((166 * 5, 400, 3), dtype="uint8")

# loop over the top ten results
                    help="path to trained anomaly detection model")
    ap.add_argument(
        "-i",
        "--dataset_root",
        required=False,
        default=
        "/Users/patrickryan/Development/python/mygithub/pyimagesearch-python-machine-learning/3scenes",
        help="path to input image")
    args = vars(ap.parse_args())

    # load the anomaly detection model
    print("[INFO] loading anomaly detection model...")
    model = pickle.loads(open(args["model"], "rb").read())

    histo = RGBHistogram(bins=(3, 3, 3),
                         include_color_stats=True,
                         color_cvt=cv2.COLOR_BGR2HSV)

    imagePaths = list_images(args["dataset_root"])
    image_count = 0
    correct_count = 0
    for imagePath in imagePaths:
        image_count += 1

        features, image = histo.get_features(imagePath)

        pred = model.predict([features])[0]
        if 'forest' in imagePath:
            if pred == 1:
                correct_count += 1
        else:
예제 #10
0
import pickle
import cv2
import os

ap = argparse.ArgumentParser()
ap.add_argument(
    "-d",
    "--dataset",
    required=True,
    help="Path to the directory that contains the images to be indexed")
ap.add_argument("-i",
                "--index",
                required=True,
                help="Path to where the computed index will be stored")
args = vars(ap.parse_args())

index = {}
desc = RGBHistogram([16, 16, 16])
for imagePath in os.listdir(args["dataset"]):
    # load the image, describe it using our RGB histogram
    # descriptor, and update the index
    image = cv2.imread(os.path.join(args["dataset"], imagePath))
    image = cv2.resize(image, (166, 400))
    features = desc.describe(image)
    index[imagePath] = features

f = open(args["index"], "wb")
f.write(pickle.dumps(index))
f.close()
# show how many images we indexed
print("[INFO] done...indexed {} images".format(len(index)))
예제 #11
0
if __name__ == '__main__':
    ap = argparse.ArgumentParser()
    ap.add_argument("--dataset",
                    required=False,
                    default='./intro-anomaly-detection/forest',
                    help="path to dataset of training images")
    ap.add_argument("--model",
                    required=False,
                    default='./models/forest_anomoly_detector.model',
                    help="path/name to store models")

    args = vars(ap.parse_args())

    print(f"Loading dataset")
    dataset, _ = RGBHistogram.load_dataset(args['dataset'],
                                           include_color_stats=True,
                                           bins=(3, 3, 3),
                                           color_cvt=cv2.COLOR_BGR2HSV)

    # train the anomly detection model
    print("Fitting anomoly detection model")
    model = IsolationForest(n_estimators=100,
                            contamination=0,
                            behaviour="new",
                            random_state=42)
    # model = LocalOutlierFactor(n_neighbors=10, novelty=True)
    model.fit(dataset)

    print("Save model to disk")
    with open(args['model'], "wb") as f:
        f.write(pickle.dumps(model))
예제 #12
0
ap = argparse.ArgumentParser()
ap.add_argument('-d', '--dataset', required = True,
    help = 'Path to the directory that contains the images we just indexed')
ap.add_argument('-i', '--index', required = True,
    help = 'Path to where we stored our index')
ap.add_argument('-q', '--query', required = True,
    help = 'Path to query image')
args = vars(ap.parse_args())

# load the query image and show it
queryImage = cv2.imread(args['query'])
cv2.imshow('Query', queryImage)
print('query: %s' % (args['query']))

# describe the query
desc = RGBHistogram([8, 8, 8])
queryFeatures = desc.describe(queryImage)

# load the index and initialise our searcher
index = cPickle.loads(open(args['index']).read())
searcher = Searcher(index)
results = searcher.search(queryFeatures)

# initialise the a montage to display the results
# displaying the top 5 results (images are 460x636 pixels)
montage = np.zeros((460 * 5, 636, 3), dtype = 'uint8')

for j in xrange(0, 5):
    # grab results and load the result image
    (score, imageName) = results[j]
    path = args['dataset'] + '/%s' % (imageName)