Example #1
0
def main_mat_construct(image_paths_m):
    print 'Iterating through features. . .'
    for image_path in image_paths_m:
        #print image_path
        im = cv2.imread(image_path)
        im = imutilspy.resize(im, height = 200)
        
        #kpts = fea_det.detect(im)
        kpts = orb.detect(im,None)
        
        #kpts, des = des_ext.compute(im, kpts)
        kp, des = orb.compute(im, kpts)
        
        des_list.append((image_path, des))
        
        
    # Stack all the descriptors vertically in a numpy array
    print 'Converting data to main matrix. . .'
    descriptors = des_list[0][1]
    for image_path, descriptor in des_list[1:]:
        descriptors = np.vstack((descriptors, descriptor))  
    # Perform k-means clustering
    k = 100
    voc, variance = kmeans(descriptors, k, 1) 

    # Calculate the histogram of features
    print 'Calculating histogram of features. . .'
    im_features = np.zeros((len(image_paths_m), k), "float32")
    for i in xrange(len(image_paths_m)):
        words, distance = vq(des_list[i][1],voc)
        for w in words:
            im_features[i][w] += 1

    # Perform Tf-Idf vectorization
    nbr_occurences = np.sum( (im_features > 0) * 1, axis = 0)
    idf = np.array(np.log((1.0*len(image_paths_m)+1) / (1.0*nbr_occurences + 1)), 'float32')

    # Scaling the words
    print 'Scaling Words. . .'
    stdSlr = StandardScaler().fit(im_features)
    im_features = stdSlr.transform(im_features)

    return im_features
Example #2
0
#path = 'images/beach_trash_13.jpg'

visual = True
#visual = False
 
#roi = cv2.imread('images/soccer-player-2.jpg')
#roi = cv2.imread('images/object_group_2.jpg')
roi = cv2.imread(path)

hsv = cv2.cvtColor(roi,cv2.COLOR_BGR2HSV)
 
#target = cv2.imread('images/soccer-player-2.jpg')
#target = cv2.imread('images/surgeon_2.jpg')
#target = cv2.imread('images/object_group_2.jpg')
target = cv2.imread(path)
target = imutilspy.resize(target, height = 1000)

hsvt = cv2.cvtColor(target,cv2.COLOR_BGR2HSV)

img_height = target.shape[0]
img_width = target.shape[1]

# calculating object histogram
roihist = cv2.calcHist([hsv],[0, 1], None, [180, 256], [0, img_height, 0, img_width] )
 
# normalize histogram and apply backprojection
cv2.normalize(roihist,roihist,0,255,cv2.NORM_MINMAX)
dst = cv2.calcBackProject([hsvt],[0,1],roihist,[0,img_height,0,img_width],1)
 
# Now convolute with circular disc
disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
Example #3
0
    image_paths_m += class_path
    class_id += 1


# List where all the descriptors are stored


print "Iterating through features"


m = 0
dest_path = "dataset/4x4_data/water-bottle/"
for image_path in image_paths_m:
    print image_path
    im = cv2.imread(image_path)
    im = imutilspy.resize(im, height=200)

    img_height = im.shape[0]
    img_width = im.shape[1]

    uple = im[0 : (img_height) / 2, 0 : (img_width) / 2]
    dole = im[(img_height) / 2 : img_height, 0 : (img_width) / 2]
    upri = im[0 : (img_height) / 2, ((img_width) / 2) : img_width]
    dori = im[((img_height) / 2) : img_height, ((img_width) / 2) : img_width]

    uple_st = dest_path + str(m) + "_0.png"
    dole_st = dest_path + str(m) + "_1.png"
    upri_st = dest_path + str(m) + "_2.png"
    dori_st = dest_path + str(m) + "_3.png"

    cv2.imwrite(uple_st, uple)
Example #4
0
    cv2.filter2D(dst,-1,disc,dst)
     
    # threshold and binary AND
    ret,thresh = cv2.threshold(dst,50,255,0)
    return thresh

#img = cv2.imread('dataset/data/water_bottle/2.jpg')
#img = cv2.imread('dataset/data/water_bottle/21.jpg')
#img = cv2.imread('dataset/data_two_choice/test/paper/1.jpg')
#img = cv2.imread('dataset/data_two/paper/IMG_0205.JPG',0)
#img = cv2.imread('dataset/data_two/water_bottle/IMG_0187.JPG',0)
#img = cv2.imread('dataset/data_two/water_bottle/IMG_0155.JPG')
#img = cv2.imread('dataset/data_two/water_bottle/IMG_0171.JPG')
img = cv2.imread('dataset/data_two/water_bottle/IMG_0163.JPG')

img = imutilspy.resize(img, height = 400)



img = histBackProj(img)
img = invert_img(img)



cv2.imshow('test', img)
cv2.waitKey(0)
'''
img = cv2.medianBlur(img,5)
ret,img = cv2.threshold(img,127,255,cv2.THRESH_BINARY)

'''
Example #5
0
    image_classes_m+=[class_id]*len(class_path)
    class_id+=1
        
image_paths_tr, image_paths_te, image_classes_tr, image_classes_te = cross_validation.train_test_split(image_paths_m, image_classes_m, test_size = 0.2)

    
if option == 'b':

    # List where all the descriptors are stored
    des_list = []
    sk_count = 0
    count = 0
    print 'Iterating through features'
    for image_path in image_paths_tr:
        im = cv2.imread(image_path)
        im = imutilspy.resize(im, height = 200)

        #im = grabCut(im)
        
        #kpts = fea_det.detect(im)
        kpts = orb.detect(im,None)
        
        #kpts, des = des_ext.compute(im, kpts)
        kp, des = orb.compute(im, kpts)
        
        
        if des == None:
            print image_path
            os.remove(image_path)
            sk_count = sk_count + 1
        else:
Example #6
0
for training_name in training_names:
    dir = os.path.join(train_path, training_name)
    class_path = imutils.imlist(dir)
    image_paths_m += class_path
    class_id += 1

# List where all the descriptors are stored

print 'Iterating through features'

m = 0
dest_path = 'dataset/4x4_data/water-bottle/'
for image_path in image_paths_m:
    print image_path
    im = cv2.imread(image_path)
    im = imutilspy.resize(im, height=200)

    img_height = im.shape[0]
    img_width = im.shape[1]

    uple = im[0:(img_height) / 2, 0:(img_width) / 2]
    dole = im[(img_height) / 2:img_height, 0:(img_width) / 2]
    upri = im[0:(img_height) / 2, ((img_width) / 2):img_width]
    dori = im[((img_height) / 2):img_height, ((img_width) / 2):img_width]

    uple_st = dest_path + str(m) + '_0.png'
    dole_st = dest_path + str(m) + '_1.png'
    upri_st = dest_path + str(m) + '_2.png'
    dori_st = dest_path + str(m) + '_3.png'

    cv2.imwrite(uple_st, uple)