required=True,
                help="Path to where we stored our index")
ap.add_argument("-q", "--query", required=True, help="Path to query image")
args = vars(ap.parse_args())

# load the query image and show it
queryImage = cv2.imread(args["query"])
cv2.imshow("Query", queryImage)
print("queryImage " + str(queryImage))
print "query: %s" % (args["query"])

# describe the query in the same way that we did in
# index.py -- a 3D RGB histogram with 8 bins per
# channel
desc = RGBHistogram([8, 8, 8])
queryFeatures = desc.describe(queryImage)

# load the index perform the search
index = cPickle.loads(open(args["index"]).read())
searcher = Searcher(index)
results = searcher.search(queryFeatures)

# initialize the two montages to display our results --
# we have a total of 25 images in the index, but let's only
# display the top 10 results; 5 images per montage, with
# images that are 400x166 pixels
montageA = np.zeros(
    (len(queryImage) * 5, len(queryImage[0]), 3),
    dtype="uint8")  #np.zeros((166 * 5, 400, 3), dtype = "uint8")
montageB = np.zeros(
    (len(queryImage) * 5, len(queryImage[0]), 3),
	help = "Path to where the computed index will be stored")
args = vars(ap.parse_args())

# initialize the index dictionary to store our our quantifed
# images, with the 'key' of the dictionary being the image
# filename and the 'value' our computed features
index = {}

# initialize our image descriptor -- a 3D RGB histogram with
# 8 bins per channel
desc = RGBHistogram([8, 8, 8])

# use glob to grab the image paths and loop over them
for imagePath in glob.glob(args["dataset"] + "/*.jpg"):
	# extract our unique image ID (i.e. the filename)
	k = imagePath[imagePath.rfind("/") + 1:];print("k "+k)

	# load the image, describe it using our RGB histogram
	# descriptor, and update the index
	image = cv2.imread(imagePath)
	features = desc.describe(image)
	index[k] = features

# we are now done indexing our image -- now we can write our
# index to disk
f = open(args["index"], "w")
f.write(cPickle.dumps(index))
f.close()

# show how many images we indexed
print "done...indexed %d images" % (len(index))
Exemplo n.º 3
0
    # use glob to grab the image paths and loop over them
    for imagePath in glob.glob(args["dataset"] + os.sep + "*.png"):
        # load the image, describe it using our RGB histogram
        # descriptor, and update the index
        image = cv2.imread(imagePath)
        # print(os.path.basename(k))

        # Print each image to get the working status
        print(imagePath)
        print("-------------")
        # extract our unique image ID (i.e. the filename)
        k = imagePath[imagePath.rfind(os.sep) + 1:]
        # print(k)

        features = desc.describe(image)
        index[k] = features

elif option == "lbp":
    print("LBP Descriptor")
    # initialize the local binary patterns descriptor along with
    # the data and label lists
    desc = LocalBinaryPatterns(24, 8)
    # data = []
    # labels = []
    # size = len(glob.glob(args["dataset"] + os.sep + "*.png"))

    # Training
    # loop over the training images
    for imagePath in glob.glob(args["dataset"] + os.sep + "*.png"):
        # load the image, convert it to gray scale, and describe it
Exemplo n.º 4
0
# initialize the index dictionary to store our our quantifed
# images, with the 'key' of the dictionary being the image
# filename and the 'value' our computed features
index = {}

# initialize our image descriptor -- a 3D RGB histogram with
# 8 bins per channel
desc = RGBHistogram([8, 8, 8])

# use glob to grab the image paths and loop over them
for imagePath in glob.glob(args["dataset"] + "/*.png"):
	# extract our unique image ID (i.e. the filename)
	k = imagePath[imagePath.rfind("/") + 1:]

	# load the image, describe it using our RGB histogram
	# descriptor, and update the index
	image = cv2.imread(imagePath)
	features = desc.describe(image)
	index[k] = features

# we are now done indexing our image -- now we can write our
# index to disk
f = open(args["index"], "w")
f.write(cPickle.dumps(index))
f.close()

# show how many images we indexed
print "done...indexed %d images" % (len(index))

Exemplo n.º 5
0
# loop over the image and mask paths

#for (imagePath, maskPath) in zip(imagePaths, maskPaths):
#    # load the image and mask
#    image = cv2.imread(imagePath)
#    mask = cv2.imread(maskPath)
#    mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
    
for imagePath in imagePaths:
    # load the image and mask
    image = cv2.imread(imagePath)
#    mask = cv2.imread(maskPath)
    mask = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    # describe the image
    features = desc.describe(image, mask)

    # update the list of data and targets
    data.append(features)
    target.append(imagePath.split("_")[-2])

# grab the unique target names and encode the labels
targetNames = np.unique(target)
le = LabelEncoder()
target = le.fit_transform(target)

# construct the training and testing splits
(trainData, testData, trainTarget, testTarget) = train_test_split(data, target,
    test_size = 0.3, random_state = 42)

# train the classifier
Exemplo n.º 6
0
	image = cv2.imread(imagePath)
	gray= cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
	kp, dsc= sift.detectAndCompute(gray, None)
	BOW.add(dsc)

dictionary = BOW.cluster()

desc2 = SIFTHistogram(dictionarySize,dictionary)

# use glob to grab the image paths and loop over them
for imagePath in glob.glob(args["dataset"] + "/*.jpg"):
	# extract our unique image ID (i.e. the filename)
	k = imagePath[imagePath.rfind("/") + 1:]

	# load the image, describe it using our RGB histogram
	# descriptor, and update the index
	image = cv2.imread(imagePath)
	features = list(desc.describe(image))
	features2 = desc2.describe(image)
	features = features + features2
	# features = features2
	index[k] = np.array(features)

# we are now done indexing our image -- now we can write our
# index to disk
f = open(args["index"], "w")
f.write(cPickle.dumps(index))
f.close()

# show how many images we indexed
print "done...indexed %d images" % (len(index))
Exemplo n.º 7
0
ap.add_argument("-i", "--index", required = True,
	help = "Path to where we stored our index")
ap.add_argument("-q", "--query", required = True,
	help = "Path to query image")
args = vars(ap.parse_args())

# load the query image and show it
queryImage = cv2.imread(args["query"])
cv2.imshow("Query", queryImage)
print "query: %s" % (args["query"])

# describe the query in the same way that we did in
# index.py -- a 3D RGB histogram with 8 bins per
# channel
desc = RGBHistogram([8, 8, 8])
queryFeatures = desc.describe(queryImage)

# load the index perform the search
index = cPickle.loads(open(args["index"]).read())
searcher = Searcher(index)
results = searcher.search(queryFeatures)

# initialize the two montages to display our results --
# we have a total of 25 images in the index, but let's only
# display the top 10 results; 5 images per montage, with
# images that are 400x166 pixels
montageA = np.zeros((166 * 5, 400, 3), dtype = "uint8")
montageB = np.zeros((166 * 5, 400, 3), dtype = "uint8")

# loop over the top ten results
for j in xrange(0, 10):
Exemplo n.º 8
0
directory = args["query"]
for filename in os.listdir(directory):
    if filename.endswith(".png"):
        queryPath = os.path.join(directory, filename)
        print(queryPath)
        queryImage = cv2.imread(queryPath)
        queryImage = cv2.resize(queryImage, (450, 360))
        cv2.putText(queryImage, queryPath, (10, 30), cv2.FONT_HERSHEY_SIMPLEX,
                    1.0, (0, 0, 255), 3)
        cv2.imshow("Query", queryImage)
        print("query: %s" % queryPath)
        if search == 0:
            if args["descriptor"] == "rgb":
                desc = RGBHistogram([8, 8, 8])
                queryFeatures = desc.describe(queryImage)
            elif args["descriptor"] == "lbp":
                desc = LocalBinaryPatterns(24, 8)
                gray = cv2.cvtColor(queryImage, cv2.COLOR_BGR2GRAY)
                queryFeatures = desc.describe(gray)
            elif args["descriptor"] == "hog":
                winSize = (64, 64)
                blockSize = (16, 16)
                blockStride = (8, 8)
                cellSize = (8, 8)
                nbins = 9
                derivAperture = 1
                winSigma = 4.
                histogramNormType = 0
                L2HysThreshold = 2.0000000000000001e-01
                gammaCorrection = 0
    '--dataset',
    required=True,
    help='Path to the directory that contains the images we just indexed')
ap.add_argument('-i',
                '--index',
                required=True,
                help='Path to where we stored our index')
ap.add_argument('-q', '--query', required=True, help='Path to query imag')
args = vars(ap.parse_args())

path = args['query']
image = cv2.imread(path)
cv2.imshow("Query", image)

desc = RGBHistogram([8, 8, 8])
queryFeature = desc.describe(image)
index = pickle.loads(open(args['index'], 'rb').read())
searcher = Searcher(index)
results = searcher.search(queryFeature)

montageA = np.zeros((166 * 5, 400, 3), dtype='uint8')
montageB = np.zeros((166 * 5, 400, 3), dtype='uint8')

for j in range(0, 10):
    (score, imageName) = results[j]
    path = os.path.join(args['dataset'], imageName)
    result = cv2.imread(path)
    print('\t {}. {} : {:.3f}'.format(j + 1, imageName, score))
    if j < 5:
        montageA[j * 166:(j + 1) * 166, :] = result
    else:
Exemplo n.º 10
0
# initialize the list of data and class label targets
data = []
target = []

# initialize the image descriptor
desc = RGBHistogram([8, 8, 8])

# loop over the image and mask paths
for (imagePath, maskPath) in zip(imagePaths, maskPaths):
	# load the image and mask
	image = cv2.imread(imagePath)
	mask = cv2.imread(maskPath)
	mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)

	# describe the image
	features = desc.describe(image, mask)

	# update the list of data and targets
	data.append(features)
	target.append(imagePath.split("_")[-2])

# grab the unique target names and encode the labels
targetNames = np.unique(target)
le = LabelEncoder()
target = le.fit_transform(target)

# construct the training and testing splits
(trainData, testData, trainTarget, testTarget) = train_test_split(data, target,
	test_size = 0.3, random_state = 42)

# train the classifier
Exemplo n.º 11
0
imagePaths = sorted(glob.glob(args['images'] + "/*.jpg"))
maskPaths = sorted(glob.glob(args["masks"] + "/*.png"))

data = []
target = []

desc = RGBHistogram([8, 8, 8])

for (imagePath, maskPath) in zip(imagePaths, maskPaths):
    image = cv2.imread(imagePath)
    mask = cv2.imread(maskPath)
    mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
    import pdb
    pdb.set_trace()
    features = desc.describe(image, mask)
    data.append(features)
    target.append(imagePath.split("_")[-2])

targetNames = np.unique(target)
le = LabelEncoder()
target = le.fit_transform(target)
(trainData, testData, trainTarget,
 testTarget) = train_test_split(data, target, test_size=0.3, random_state=42)
model = RandomForestClassifier(n_estimators=25, random_state=84)
model.fit(trainData, trainTarget)
print(
    classification_report(testTarget,
                          model.predict(testData),
                          target_names=targetNames))