def __init__(self, imagePaths, maskPaths):

        """ap = argparse.ArgumentParser()
        ap.add_argument("-i", "--images", required = True,
        help = "path to the image dataset")
        ap.add_argument("-m", "--masks", required = True,
        help = "path to the image masks")
        args = vars(ap.parse_args())"""

        data = []
        target = []

        rgbHistObj = RGBHistogram([8, 8, 8])
        adaptiveObj = Adaptive()
        i = 1
        for imagePath in imagePaths:
            image = cv2.imread(imagePath)
            # mask = cv2.imread(maskPath)
            # mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
            # threshImage = adaptiveObj.getThresh(maskPath)
            print "->" + str(i)
            i = i + 1
            features = rgbHistObj.calculateHist(image, mask=None)  # Maskta size hatasi veriyor none yaptim.
            data.append(features)
            target.append(imagePath.split("_")[1])

        # Machine Learning
        targetNames = np.unique(target)  # Tekrarlari at.
        self.le = LabelEncoder()
        target = self.le.fit_transform(target)  # Tur isimlerini integer a cevir mach. lear. icin

        # Random Train ve Test datasi olustur elimizdeki verecegimiz egitim setinin overfittting olmamasi icin.
        # Datanin dogrulugunu kontrol icin AccuracyTest kodu yazildi.
        (trainData, testData, trainTarget, testTarget) = train_test_split(data, target, test_size=0.3, random_state=42)

        self.model = RandomForestClassifier(n_estimators=25, random_state=84)
        self.model.fit(trainData, trainTarget)  # Egitim gerceklesiyor.

        print "*********************************************EGITIM VERLERI************************************************\n" + classification_report(
            testTarget, self.model.predict(testData), target_names=targetNames
        ) + "************************************************************************************************************"

        """for i in np.random.choice(np.arange(0, len(imagePaths)), 10):
Example #2
0
import glob
import cv2

ap = argparse.ArgumentParser()
ap.add_argument(
    "-d",
    "--dataset",
    required=True,
    help="Path to the catalog that contains the images to be indexed")
ap.add_argument("-i", "--index", required=True, help="Path to index")
args = vars(ap.parse_args())

datasetDir = args["dataset"]

index = {}
desc = RGBHistogram([8, 8, 8])

INIT_SIZE = 800

step = 0

for imagePath in glob.glob(datasetDir + "/*.*g"):
    photoName = imagePath[imagePath.rfind("/") + 1:]

    image = cv2.imread(imagePath)

    size = image.shape[:2]
    coefficient = size[0] / size[1]
    # Resize image
    width = INIT_SIZE
    height = int(INIT_SIZE * coefficient)
Example #3
0
from RGBHistogram import RGBHistogram
from ZernikeMoments import ZernikeMoments
import cv2
from matplotlib import pyplot as plt
import glob
import pickle
from Searcher import Searcher

# desc = RGBHistogram([8 8, 8])
desc = RGBHistogram([4, 4, 4])


test_image_path = '../data/flowers/image_0030.jpg'
query_image = cv2.imread(test_image_path)
query_feature = desc.describe(query_image)
# print(query_feature)
# quit()

# load the index and initialize our searcher
index = pickle.load(open("Histogram_only_index_4bins.cpickle", "rb"))
print(index.items())
searcher = Searcher(index)
results = searcher.search(query_feature)
print(results)
for i in range(0, 5):
	ret_path = '../data/' + results[i][1].replace('\\','/')
	img = cv2.imread(ret_path)
	cv2.imshow(str(i), img)

cv2.waitKey()
exit()
Example #4
0
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import train_test_split
from sklearn.metrics import classification_report
import numpy as np
import argparse
import glob
import cv2

imagePaths = sorted(glob.glob("dataset/images/*.png"))
maskPaths = sorted(glob.glob("dataset/masks/*.png"))
imagePath = "TestImg/picTac.png"

data = []
target = []

desc = RGBHistogram([8, 8, 8])

counter = 0
for imagePath, maskPath in zip(imagePaths, maskPaths):
    image = cv2.imread(imagePath)
    mask = cv2.imread(maskPath)
    mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)

    features = desc.describe(image, mask)

    data.append(features)
    target.append(imagePath.split('_')[-2])
    #print target, '\n'

targetNames = np.unique(target)
le = LabelEncoder()
imagePaths = sorted(glob.glob("dataset/imagesjpg" + "/*.jpg"))  # Egitim path i.
maskPaths = sorted(glob.glob("dataset/masksjpg" + "/*.jpg"))

trainObj = TrainClassify(imagePaths, maskPaths)

# ****************************************************TAC TEST***************************************************************
# adaptiveObj = Adaptive()
testPaths = sorted(glob.glob("test" + "/*.jpg"))

for testPath in testPaths:
    image = cv2.imread(testPath)
    # threshImage = adaptiveObj.getThresh(imagePath)
    # convertedThresh = cv2.cvtColor(threshImage, cv2.COLOR_GRAY2BGR)
    # masked_img = cv2.bitwise_and(image,image,mask = threshImage)
    # cv2.imshow("asdasd", masked_img)
    rgbHistObj = RGBHistogram([8, 8, 8])
    features = rgbHistObj.calculateHist(image, mask=None)

    flower = trainObj.le.inverse_transform(trainObj.model.predict(features))[
        0
    ]  # Burada ilk feature i isim olarak aliyor bunu integerdan ceviriyor.

    print testPath

    if flower == "crocus":
        flower = "cigdem"
    if flower == "daisy":
        flower = "papatya"
    if flower == "pansy":
        flower = "menekse"
    if flower == "sunflower":