Exemplo n.º 1
2
def save_model():
    train_path = "/home/uawsscu/PycharmProjects/Pass1/object_recognition_detection/pic"

    training_names = os.listdir(train_path)

    image_paths = []
    image_classes = []  ## 00000,111111,2222,33333
    class_id = 0
    for training_name in training_names:
        dir = os.path.join(train_path, training_name)
        # print("dir : ",dir)
        class_path = imutils.imlist(dir)
        # print class_path," classPath Type : ",type(class_path)
        image_paths += class_path
        image_classes += [class_id] * len(class_path)
        #  print " image class : ",image_classes
        class_id += 1

    # Create feature extraction and keypoint detector objects
    # print image_classes," imP :",image_paths

    fea_det = cv2.FeatureDetector_create("SIFT")
    des_ext = cv2.DescriptorExtractor_create("SIFT")

    # List where all the descriptors are stored
    des_list = []

    for image_path in image_paths:

        im = cv2.imread(image_path)
        kpts = fea_det.detect(im)
        # print "kpt : ",kpts
        kpts, des = des_ext.compute(im, kpts)
        des_list.append((image_path, des))
        #print image_path
        #print "des : ",des

        # Stack all the descriptors vertically in a numpy array
    descriptors = des_list[0][1]
    # print "dess : ",descriptors

    for image_path, descriptor in des_list[1:]:
        try:
            # print "des2 : ",descriptor
            descriptors = np.vstack((descriptors, descriptor))

        except:
            # print image_paths
            pass

        # Perform k-means clustering

    k = 7
    voc, variance = kmeans(descriptors, k, 1)

    #print voc

    # Calculate the histogram of features
    im_features = np.zeros((len(image_paths), k),
                           "float32")  # len(ALL pic) >> [0000000][00000]...
    #print im_features #[00000][0000]

    for i in xrange(len(image_paths)):
        try:
            words, distance = vq(des_list[i][1], voc)
            for w in words:
                im_features[i][w] += 1
                print im_features
        except:
            print "pass 339"
            pass

    # Perform Tf-Idf vectorization

    nbr_occurences = np.sum((im_features > 0) * 1, axis=0)
    idf = np.array(
        np.log((1.0 * len(image_paths) + 1) / (1.0 * nbr_occurences + 1)),
        'float32')
    im_features = np.multiply(im_features, idf)

    # Scaling the words
    stdSlr = StandardScaler().fit(im_features)
    #print stdSlr
    im_features = stdSlr.transform(im_features)
    #print im_features
    clf = LinearSVC()
    clf.fit(im_features, np.array(image_classes))
    # Save the SVM

    joblib.dump((clf, training_names, stdSlr, k, voc), "train.pkl", compress=3)
    print clf, " ", training_names, " ", stdSlr, " ", k, " ", voc
    print "SAVE Model"
Exemplo n.º 2
0
def save_model():
    train_path = "/home/mprang/PycharmProjects/object_detection/object_recognition_detection/pic"
    training_names = os.listdir(train_path)
    image_paths = []
    image_classes = []  ## 00000,111111,2222,33333
    class_id = 0
    for training_name in training_names:
        dir = os.path.join(train_path, training_name)
        class_path = imutils.imlist(dir)

        image_paths += class_path
        image_classes += [class_id] * len(class_path)
        class_id += 1

    # Create feature extraction and keypoint detector objects
    # print image_classes," imP :",image_paths

    fea_det = cv2.FeatureDetector_create("SIFT")
    des_ext = cv2.DescriptorExtractor_create("SIFT")

    # List where all the descriptors are stored
    des_list = []

    for image_path in image_paths:
        # print image_path
        im = cv2.imread(image_path)
        kpts = fea_det.detect(im)
        kpts, des = des_ext.compute(im, kpts)
        des_list.append((image_path, des))

    # Stack all the descriptors vertically in a numpy array
    descriptors = des_list[0][1]
    for image_path, descriptor in des_list[1:]:
        descriptors = np.vstack((descriptors, descriptor))

    # Perform k-means clustering
    k = 100
    voc, variance = kmeans(descriptors, k, 1)

    # Calculate the histogram of features
    im_features = np.zeros((len(image_paths), k), "float32")  # len(ALL pic) >> [0000000][00000]...

    for i in xrange(len(image_paths)):
        words, distance = vq(des_list[i][1], voc)
        for w in words:
            im_features[i][w] += 1

    # Scaling the words
    stdSlr = StandardScaler().fit(im_features)
    im_features = stdSlr.transform(im_features)

    # Train the Linear SVM
    clf = LinearSVC()
    clf.fit(im_features, np.array(image_classes))
    # Save the SVM

    joblib.dump((clf, training_names, stdSlr, k, voc), "train.pkl", compress=3)
    print "SAVE MODEL"
def TestSampleFeaturesGenerator(image_path):
	stdSlr, k, voc = joblib.load("bof.pkl")

	image_paths = imutils.imlist(image_path)
# List where all the descriptors are stored
	des_list = []
	HH = []
	for image_path in image_paths:
	    im = cv2.imread(image_path)
	    if im == None:
	        print "No such file {}\nCheck if the file exists".format(image_path)
	        exit()
	    kpts, des = sift.detectAndCompute(im, None)
	    hsv = cv2.cvtColor(im,cv2.COLOR_BGR2HSV)
	    kernel = np.ones((50,50),np.float32)/2500
	    hsv = cv2.filter2D(hsv,-1,kernel)
	    h_hue = cv2.calcHist( [hsv], [0], None, [180], [0, 180] )
	    H = []
	    n_hue = sum(h_hue)
	    for h in h_hue:
	        hh = np.float32(float(h)/float(n_hue))
	        H.append(hh)
	    
	    h_sat = cv2.calcHist( [hsv], [1], None, [256], [0, 256] )
	    n_sat = sum(h_sat)
	    for h in h_sat:
	        hh = np.float32(float(h)/float(n_sat))
	        H.append(hh) 
	    HH.append(H)
	    des_list.append((image_path, des))   

	# Stack all the descriptors vertically in a numpy array
	# print des_list
	descriptors = des_list[0][1]
	for image_path, descriptor in des_list[0:]:
	    descriptors = np.vstack((descriptors, descriptor)) 
	# 
	test_features = np.zeros((len(image_paths), k), "float32")
	for i in xrange(len(image_paths)):
	    words, distance = vq(des_list[i][1],voc)
	    for w in words:
	        test_features[i][w] += 1

	# Scale the features
	test_features = stdSlr.transform(test_features)
	test_features = np.append(test_features, HH, axis = 1)
	fl = open('TestFeature.csv', 'w')

	writer = csv.writer(fl)
	for values in test_features:
	    writer.writerow(values)

	fl.close() 
	return test_features
def save_desc_file(train_path, method, condition):
    # Start the stopwatch / counter
    t1_start = clock()
    # Get name path of subfolder train
    training_names = os.listdir(train_path)

    # Get all the path to the images and save them in a list
    # image_paths and the corresponding label in image_paths
    image_paths = []
    image_classes = []
    class_id = 0
    for training_name in training_names:
        dir = os.path.join(train_path, training_name)
        class_path = imutils.imlist(dir)
        image_paths += class_path
        image_classes += [class_id] * len(class_path)
        class_id += 1

    des_list = []
    if (condition == 1):
        for i, filename in enumerate(image_paths):
            img = cv2.imread(filename)
            grey_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            kp, desc = detector.detectAndCompute(grey_img, None)
            des_list.append(desc)  #SAVE KP AND DESC HERE

        # Open des_list and dump des_list to des_list.txt
        with open('des_list_{}.txt'.format(method), 'wb') as fp:
            pickle.dump(des_list, fp)
        # Open des_list.txt and save data in des_list.txt to des_list
        with open('des_list_{}.txt'.format(method), 'rb') as fp:
            des_list = pickle.load(fp)
    elif (condition == 0):
        # Open des_list.txt and save data in des_list.txt to des_list
        with open('des_list_{}.txt'.format(method), 'rb') as fp:
            des_list = pickle.load(fp)

    # Stop the stopwatch / counter
    t1_stop = clock()
    # # Print Elapsed time during t1_stop-t1_start
    print("Save desc file time during in seconds:", t1_stop - t1_start)
    return des_list, image_paths
Exemplo n.º 5
0
    def ExtractFeatures(self):
        train_path = 'dataset/trained/clusters'
        training_names = os.listdir(train_path)
        
        for training_name in training_names:
            image_paths = []
            image_classes = []
            class_id = 0
            full_train_path = os.path.join(train_path, training_name)
            cluster_dir_names = os.listdir(full_train_path)
        
            for cluster_dir_name in cluster_dir_names:
                dirs = os.path.join(full_train_path, cluster_dir_name)
                class_path = imutils.imlist(dirs)
                image_paths+=class_path
                image_classes+=[class_id]*len(class_path)
                class_id+=1
        
            # Create feature extraction and keypoint detector objects
            fea_det = cv2.FeatureDetector_create("ORB")
            des_ext = cv2.DescriptorExtractor_create("ORB")
            
            # List where all the descriptors are stored
            des_list = []
            
            for image_path in image_paths:
#                 print image_path
                im = cv2.imread(image_path)
                kpts = fea_det.detect(im)
                kpts, des = des_ext.compute(im, kpts)
                print image_path
                print des.shape
                des_list.append((image_path, des))  
                
                
            # Stack all the descriptors vertically in a numpy array
            descriptors = des_list[0][1]
            for image_path, descriptor in des_list[1:]:
#                 print image_path
                descriptors = np.vstack((descriptors, descriptor)) 
                
            print descriptors
Exemplo n.º 6
0
group.add_argument("-i", "--image", help="Path to image")
parser.add_argument("-v", "--visualize", action="store_true")
args = vars(parser.parse_args())

# Get the path of the testing image(s) and store them in a list
image_paths = []
if args["testingSet"]:
    test_path = args["testingSet"]
    try:
        testing_names = os.listdir(test_path)
    except OSError:
        print "No such directory {}\nCheck if the file exists".format(test_path)
        exit()
    for testing_name in testing_names:
        dir = os.path.join(test_path, testing_name)
        class_path = imutils.imlist(dir)
        image_paths += class_path
else:
    image_paths = [args["image"]]

# Create feature extraction and keypoint detector objects
fea_det = cv2.FeatureDetector_create("SIFT")
des_ext = cv2.DescriptorExtractor_create("SIFT")

# List where all the descriptors are stored
des_list = []

for image_path in image_paths:
    im = cv2.imread(image_path)
    if im == None:
        print "No such file {}\nCheck if the file exists".format(image_path)
parser = ap.ArgumentParser()
parser.add_argument("-t", "--trainingSet", help="Path to Training Set", required="True")
args = vars(parser.parse_args())

# Get the training classes names and store them in a list
train_path = args["trainingSet"]
training_names = os.listdir(train_path)

# Get all the path to the images and save them in a list
# image_paths and the corresponding label in image_paths
image_paths = []
image_classes = []
class_id = 0
for training_name in training_names:
    dir = os.path.join(train_path, training_name)
    class_path = imutils.imlist(dir)
    image_paths+=class_path
    image_classes+=[class_id]*len(class_path)
    class_id+=1

# Create feature extraction and keypoint detector objects
#fea_det = cv2.FeatureDetector_create("SIFT")
sift = cv2.xfeatures2d.SIFT_create()
#des_ext = cv2.DescriptorExtractor_create("SIFT")
#(kps, descs) = sift.detectAndCompute(gray, None)
# List where all the descriptors are stored
des_list = []

for image_path in image_paths:
    im = cv2.imread(image_path)
    #kpts = fea_det.detect(im)
Exemplo n.º 8
0
def TestSampleFeaturesGeneratorWithLabel(train_path):
    stdSlr, k, voc = joblib.load("bof.pkl")
    training_names = mylistdir(train_path)
    image_paths = []
    image_classes = []
    for training_name in training_names:
        dir = os.path.join(train_path, training_name)
        class_path = imutils.imlist(dir)
        image_paths += class_path
        image_classes += [training_name] * len(class_path)
    des_list = []
    HH = []
    image_names = np.reshape(image_paths, (-1, 1))
    for image_path in image_paths:
        im = cv2.imread(image_path)
        if im == None:
            print "No such file {}\nCheck if the file exists".format(image_path)
            exit()
        kpts, des = sift.detectAndCompute(im, None)
        hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
        kernel = np.ones((50, 50), np.float32) / 2500
        hsv = cv2.filter2D(hsv, -1, kernel)
        h_hue = cv2.calcHist([hsv], [0], None, [180], [0, 180])
        H = []
        n_hue = sum(h_hue)
        for h in h_hue:
            hh = np.float32(float(h) / float(n_hue))
            H.append(hh)
        h_sat = cv2.calcHist([hsv], [1], None, [256], [0, 256])
        temp = []
        temp.append(np.std(H, ddof=1))
        # H = []
        n_sat = sum(h_sat)
        for h in h_sat:
            hh = np.float32(float(h) / float(n_sat))
            H.append(hh)
        temp.append(np.std(H, ddof=1))
        HH.append(H)
        des_list.append((image_path, des))
    # Stack all the descriptors vertically in a numpy array
    # print des_list
    descriptors = des_list[0][1]
    for image_path, descriptor in des_list[0:]:
        descriptors = np.vstack((descriptors, descriptor))
    #
    test_features = np.zeros((len(image_paths), k), "float32")
    for i in xrange(len(image_paths)):
        words, distance = vq(des_list[i][1], voc)
        for w in words:
            test_features[i][w] += 1

    # Scale the features
    test_features = stdSlr.transform(test_features)
    image_classes = np.reshape(image_classes, (-1, 1))
    test_features = np.append(test_features, HH, axis=1)
    res = np.append(test_features, image_classes, axis=1)
    res = np.append(image_names, res, axis=1)
    fl = open('TestFeatureWithLabel.csv', 'w')

    writer = csv.writer(fl)
    for values in res:
        writer.writerow(values)

    fl.close()
    return res
Exemplo n.º 9
0
                    "--trainingSet",
                    help="Path to Training Set",
                    required="True")
args = vars(parser.parse_args())

# Get the training classes names and store them in a list
train_path = args["trainingSet"]
training_name = os.listdir(train_path)

# Get all the path to the images and save them in a list
# image_paths and the corresponding label in image_paths
image_paths = []

#    class_id ==0, Not a directory: 'dataset/train/.DS_Store

class_path = imutils.imlist(train_path)

#        all image_paths in a list
image_paths += class_path
#print image_paths

# Create feature extraction and keypoint detector objects
fea_det = cv2.FeatureDetector_create("SIFT")
des_ext = cv2.DescriptorExtractor_create("SIFT")

# List where all the descriptors are stored
des_list = []

for image_path in image_paths:
    im = cv2.imread(image_path)
    kpts = fea_det.detect(im)
Exemplo n.º 10
0
def TrainingSampleFeaturesGenerator(train_path):
    training_names = mylistdir(train_path)
    image_paths = []
    image_classes = []
    for training_name in training_names:
        dir = os.path.join(train_path, training_name)
        class_path = imutils.imlist(dir)
        image_paths += class_path
        image_classes += [training_name] * len(class_path)
    # List where all the descriptors are stored
    des_list = []
    HH = []
    for image_path in image_paths:
        im = cv2.imread(image_path)
        if im == None:
            print "No such file {}\nCheck if the file exists".format(image_path)
            exit()
        kpts, des = sift.detectAndCompute(im, None)
        hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
        kernel = np.ones((50, 50), np.float32) / 2500
        hsv = cv2.filter2D(hsv, -1, kernel)
        h_hue = cv2.calcHist([hsv], [0], None, [180], [0, 180])
        H = []
        n_hue = sum(h_hue)
        for h in h_hue:
            hh = np.float32(float(h) / float(n_hue))
            H.append(hh)
        h_sat = cv2.calcHist([hsv], [1], None, [256], [0, 256])
        temp = []
        temp.append(np.std(H, ddof=1))
        # H = []
        n_sat = sum(h_sat)
        for h in h_sat:
            hh = np.float32(float(h) / float(n_sat))
            H.append(hh)
        temp.append(np.std(H, ddof=1))
        HH.append(H)
        des_list.append((image_path, des))

    # Stack all the descriptors vertically in a numpy array
    descriptors = des_list[0][1]
    for image_path, descriptor in des_list[1:]:
        descriptors = np.vstack((descriptors, descriptor))

    # Perform k-means clustering
    k = 80
    voc, variance = kmeans(descriptors, k, 1)

    # Calculate the histogram of features
    im_features = np.zeros((len(image_paths), k), "float32")
    for i in xrange(len(image_paths)):
        words, distance = vq(des_list[i][1], voc)
        for w in words:
            im_features[i][w] += 1
    # Scaling the words
    stdSlr = StandardScaler().fit(im_features)
    im_features = stdSlr.transform(im_features)

    # Save the SVM
    joblib.dump((stdSlr, k, voc), "bof.pkl", compress=3)
    image_classes = np.reshape(image_classes, (-1, 1))
    im_features = np.append(im_features, HH, axis=1)
    res = np.append(im_features, image_classes, axis=1)
    # res = np.append(image_names, res, axis = 1)
    fl = open('FeatureSample.csv', 'w')

    writer = csv.writer(fl)
    for values in res:
        writer.writerow(values)

    fl.close()
    return im_features, image_classes
Exemplo n.º 11
0
    log.info("Getting training classes names and store them in a list")
    try:
        training_names = os.listdir(args.trainingSetPath)
    except OSError:
        log.error("No such directory {}. Check if the directory exists".format(
            args.trainingSetPath))
        exit()

    # Get all paths to the images and save them in a list with image_paths and the corresponding label in image_paths
    log.info("Getting all paths to the images and save them in a list")
    image_paths = []
    image_classes = []
    class_id = 0
    for training_name in training_names:
        directory_name = os.path.join(args.trainingSetPath, training_name)
        class_path = imutils.imlist(directory_name)
        image_paths += class_path
        image_classes += [class_id] * len(class_path)
        class_id += 1

    # Get the amount of cpus
    cpus = os.cpu_count()

    # Take the set size
    set_size = len(image_paths)

    # Calculates the number of subsets required for the quantity of cpus
    subset_size = int(numpy.ceil(set_size / cpus))

    # Divide the set into subsets according to the quantity of cpus
    log.info("Dividing feature detection and extraction between {} processes".
Exemplo n.º 12
0
image_paths = []
image_classes = []
class_temp = []
class_id = 0
if args["testingSet"]:
    test_path = args["testingSet"]
    try:
        testing_names = os.listdir(test_path)
    except OSError:
        print "No such directory {}\nCheck if the file exists".format(
            test_path)
        exit()
    for testing_name in testing_names:
        class_temp.append(testing_name)
        dir = os.path.join(test_path, testing_name)
        class_path = imutils.imlist(dir)
        image_classes += [class_id] * len(class_path)
        image_paths += class_path
        class_id += 1
else:
    image_paths = [args["image"]]

class_id_temp = 0
for i in range(len(image_classes)):
    if i != len(image_classes) - 1:
        if image_classes[i] == image_classes[i + 1]:
            image_classes[i] = class_temp[class_id_temp]
        else:
            image_classes[i] = class_temp[class_id_temp]
            class_id_temp += 1
    else:
Exemplo n.º 13
0
import os
import imutils
from PIL import Image

dataset_path = "./dataset/good/"
image_list = imutils.imlist(dataset_path)

f = open('./cars.info', 'w')

for image in image_list:
    im = Image.open(image)
    (width, height) = im.size
    f.write(image + ' 1 0 0 ' + str(width) + ' ' + str(height) + '\n')