def keypoints_censure(img): """Detect key points using CENSURE.""" censure = CENSURE(mode='STAR') censure.detect(img) keypoints = censure.keypoints return keypoints
def selectFeatures(useList): DataSet = [] LabelSet = [] lengthV = [] trainPaths = ['./fruit/' + c + '_train/' for c in classes] testPaths = ['./fruit/' + c + ' test/' for c in classes] for c in range(len(classes)): className = classes[c] path = trainPaths[c] detector = CENSURE() detector2 = ORB(n_keypoints=50) detector3 = BRIEF(patch_size=49) files = os.listdir(path) #sample files = random.sample(files, 100) nfiles = len(files) for i in range(nfiles): featureVector = [] infile = files[i] img = io.imread(path + infile, as_grey=True) hist = np.histogram(img, bins=256) img = resize(img, (400, 400)) detector2.detect_and_extract(img) detector.detect(img) a = fd = hog(img, orientations=9, pixels_per_cell=(32, 32), cells_per_block=(1, 1), visualise=False) for h in hist: fd = np.append(fd, h) if (useList[0]): fd = np.append(fd, [np.array(detector.keypoints).flatten()]) if (useList[1]): fd = np.append(fd, detector2.keypoints) if (useList[2]): fd = np.append(fd, edgeExtract(img, 100)) l1 = len(fd) corners = corner_peaks(corner_harris(img), min_distance=1) if (useList[3]): fd = np.append(fd, corners) lengthV.append(len(fd)) DataSet.append(fd) ind = classes.index(className) LabelSet.append(ind) max = np.amax(lengthV) lengthV = [] DataSet2 = [] for d in DataSet: d = np.pad(d, (0, max - len(d)), 'constant') DataSet2.append(d) lengthV.append(len(d)) DataSet = DataSet2 res = 0 #perform gridsearch with one thread if __name__ == '__main__': res = gridSearch(DataSet, LabelSet, False) return res
def __init__(self): self.heatmapper = Heatmapper( point_diameter=15, point_strength=0.05, opacity=3, colours='reveal', ) self.heatmap_name = 'heatmap.png' self.detector = CENSURE()
def get_CENSURE_kp(img): img = rgb2gray(img) censure = CENSURE(mode="star") censure.detect(img) kp = censure.keypoints return kp
def censure(img): img = color.rgb2gray(img) # tform = tf.AffineTransform(scale=(1.5, 1.5), rotation=0.5, # translation=(150, -200)) # img_warp = tf.warp(img, tform) detector = CENSURE() detector.detect(img) # return detector.keypoints, detector.scales return detector.scales
def test_keypoints_censure_moon_image_dob(): """Verify the actual Censure keypoints and their corresponding scale with the expected values for DoB filter.""" detector = CENSURE() detector.detect(img) expected_keypoints = np.array([[21, 497], [36, 46], [119, 350], [185, 177], [287, 250], [357, 239], [463, 116], [464, 132], [467, 260]]) expected_scales = np.array([3, 4, 4, 2, 2, 3, 2, 2, 2]) assert_array_equal(expected_keypoints, detector.keypoints) assert_array_equal(expected_scales, detector.scales)
def test_keypoints_censure_moon_image_octagon(): """Verify the actual Censure keypoints and their corresponding scale with the expected values for Octagon filter.""" detector = CENSURE(mode="octagon") detector.detect(img) expected_keypoints = np.array([[21, 496], [35, 46], [287, 250], [356, 239], [463, 116]]) expected_scales = np.array([3, 4, 2, 2, 2]) assert_array_equal(expected_keypoints, detector.keypoints) assert_array_equal(expected_scales, detector.scales)
def test_keypoints_censure_moon_image_dob(): """Verify the actual Censure keypoints and their corresponding scale with the expected values for DoB filter.""" detector = CENSURE() detector.detect(img) expected_keypoints = np.array( [[21, 497], [36, 46], [119, 350], [185, 177], [287, 250], [357, 239], [463, 116], [464, 132], [467, 260]] ) expected_scales = np.array([3, 4, 4, 2, 2, 3, 2, 2, 2]) assert_array_equal(expected_keypoints, detector.keypoints) assert_array_equal(expected_scales, detector.scales)
def test_keypoints_censure_moon_image_star(): """Verify the actual Censure keypoints and their corresponding scale with the expected values for STAR filter.""" detector = CENSURE(mode='star') detector.detect(rescale(img, 0.25)) # quarter scale image for speed expected_keypoints = np.array([[23, 27], [29, 89], [30, 86], [107, 59], [109, 64], [111, 67], [113, 70]]) expected_scales = np.array([3, 2, 4, 2, 5, 3, 2]) assert_array_equal(expected_keypoints, detector.keypoints) assert_array_equal(expected_scales, detector.scales)
def test_keypoints_censure_moon_image_octagon(): """Verify the actual Censure keypoints and their corresponding scale with the expected values for Octagon filter.""" detector = CENSURE(mode='octagon') detector.detect(rescale(img, 0.25)) # quarter scale image for speed expected_keypoints = np.array([[23, 27], [29, 89], [31, 87], [106, 59], [111, 67]]) expected_scales = np.array([3, 2, 5, 2, 4]) assert_array_equal(expected_keypoints, detector.keypoints) assert_array_equal(expected_scales, detector.scales)
def test_keypoints_censure_moon_image_star(): """Verify the actual Censure keypoints and their corresponding scale with the expected values for STAR filter.""" detector = CENSURE(mode='star') detector.detect(img) expected_keypoints = np.array([[21, 497], [36, 46], [117, 356], [185, 177], [260, 227], [287, 250], [357, 239], [451, 281], [463, 116], [467, 260]]) expected_scales = np.array([3, 3, 6, 2, 3, 2, 3, 5, 2, 2]) assert_array_equal(expected_keypoints, detector.keypoints) assert_array_equal(expected_scales, detector.scales)
def test_keypoints_censure_moon_image_octagon(): """Verify the actual Censure keypoints and their corresponding scale with the expected values for Octagon filter.""" detector = CENSURE(mode='octagon') detector.detect(img) expected_keypoints = np.array([[21, 496], [35, 46], [287, 250], [356, 239], [463, 116]]) expected_scales = np.array([3, 4, 2, 2, 2]) assert_array_equal(expected_keypoints, detector.keypoints) assert_array_equal(expected_scales, detector.scales)
def calc_CENSURE_score(dose_dataset): dose_score = 0 frame_no = len(dose_dataset.pixel_array) frames = dose_dataset.pixel_array.copy() #pseudo_gray_frames = np.interp(frames, (frames.min(), frames.max()), (0, 255)) for i in range(0, frame_no): im = frames[i] * 255 / frames.max() #im *= 255/frames.max() censure = CENSURE() censure.detect(im) key_points = censure.keypoints dose_score += len(key_points) normal_dose = dose_score / frame_no return normal_dose
def motionVectors(self, detectedObjects, currentFrame, nextFrame): censure = CENSURE() keypoints = np.array([]).reshape(-1, 2) nkps = {} arrowDict = {} for num, region in enumerate(detectedObjects): x0, y0, w, h = region roi = rgb2gray(currentFrame[int(y0 - 5):int(y0 + h + 5), int(x0 - 5):int(x0 + w + 5)]) try: censure.detect(roi) kps = censure.keypoints kps[:, 1] += int(x0) kps[:, 0] += int(y0) # kps = np.c_[kps,num*np.ones(kps.shape[0])] nkps[num] = kps.shape[0] keypoints = np.append(keypoints, kps, axis=0) except: print('Skipped ROI') return nextFrame, arrowDict # print(keypoints.shape) try: flow_vectors = pyramid_lucas_kanade(rgb2gray(currentFrame), rgb2gray(nextFrame), keypoints, window_size=9) except: return nextFrame, arrowDict counter = 0 aggregate_vectors = np.hstack((keypoints, flow_vectors)) for k in nkps.keys(): if nkps[k] != 0: vec = np.sum(aggregate_vectors[counter:counter + nkps[k], :], axis=0) avgY = vec[0] / nkps[k] avgX = vec[1] / nkps[k] p1 = (int(avgX), int(avgY)) p2 = (int(avgX + vec[3]), int(avgY + vec[2])) arrowDict[k] = (p1, (int(vec[3]), int(vec[2]))) cv2.arrowedLine(nextFrame, p1, p2, (225, 32, 33), 3) counter += nkps[k] return nextFrame, arrowDict
def extract_by_brief(img1, img2, min_distance=1): extractor = BRIEF() detector = CENSURE(mode='STAR') detector.detect(img1) keyp1 = detector.keypoints detector.detect(img2) keyp2 = detector.keypoints extractor.extract(img1, keyp1) desc1 = extractor.descriptors extractor.extract(img1, keyp2) desc2 = extractor.descriptors return [keyp1, keyp2, desc1, desc2]
def test_keypoints_censure_moon_image_octagon(): """Verify the actual Censure keypoints and their corresponding scale with the expected values for Octagon filter.""" detector = CENSURE(mode='octagon') detector.detect(rescale(img, 0.25)) # quarter scale image for speed expected_keypoints = np.array([[ 23, 27], [ 29, 89], [ 31, 87], [106, 59], [111, 67]]) expected_scales = np.array([3, 2, 5, 2, 4]) assert_array_equal(expected_keypoints, detector.keypoints) assert_array_equal(expected_scales, detector.scales)
def test_keypoints_censure_moon_image_star(): """Verify the actual Censure keypoints and their corresponding scale with the expected values for STAR filter.""" detector = CENSURE(mode='star') detector.detect(rescale(img, 0.25)) # quarter scale image for speed expected_keypoints = np.array([[ 23, 27], [ 29, 89], [ 30, 86], [107, 59], [109, 64], [111, 67], [113, 70]]) expected_scales = np.array([3, 2, 4, 2, 5, 3, 2]) assert_array_equal(expected_keypoints, detector.keypoints) assert_array_equal(expected_scales, detector.scales)
def getFeatures(slice, mels=256): ''' Helper function to get the features of a song. It first computes the spectrogram and performs the CENSURE image algorithm on it to return the features scatterplot. :param slice: (Slice) | slice to find features for :param mels: (int) | 'resolution' of the spectrogram :return: detector: (CENSURE) | CENSURE image detector :return: kp: (numpy.ndarray) | feature scatterplot ''' y, sr = tuple(slice) # y = effects.percussive(y) S = feature.melspectrogram(y, sr=sr, n_mels=mels) log_S = logamplitude(S, ref_power=np.max) detector = CENSURE() detector.detect(log_S) kp = detector.keypoints return detector, kp
def test_keypoints_censure_moon_image_star(): """Verify the actual Censure keypoints and their corresponding scale with the expected values for STAR filter.""" detector = CENSURE(mode='star') detector.detect(img) expected_keypoints = np.array([[ 21, 497], [ 36, 46], [117, 356], [185, 177], [260, 227], [287, 250], [357, 239], [451, 281], [463, 116], [467, 260]]) expected_scales = np.array([3, 3, 6, 2, 3, 2, 3, 5, 2, 2]) assert_array_equal(expected_keypoints, detector.keypoints) assert_array_equal(expected_scales, detector.scales)
def feature_detectors(img_grey): """ Extracts features from raw 3d tensor/grayscaled/filtered images. """ # feature detectors: censure_detector = CENSURE(mode='Octagon') censure_detector.detect(img_grey) censure_keypoints = censure_detector.keypoints censure_vec = np.ravel(censure_keypoints) orb_descriptor_extractor = ORB(n_keypoints=200) orb_descriptor_extractor.detect_and_extract(img_grey) orb_keypoints = np.ravel(orb_descriptor_extractor.keypoints) orb_descriptors = np.ravel(orb_descriptor_extractor.descriptors) orb_vec = np.concatenate((orb_keypoints, orb_descriptors), axis=0) feat_det_vec = np.concatenate((censure_vec, orb_vec), axis=0) return feat_det_vec
def warp_censure(image, map_args={}, output_shape=None, order=1, mode='constant', cval=0.0, clip=True, preserve_range=False): #img_orig = imageGlobal tform = tf.AffineTransform(scale=(1.5, 1.5), rotation=0.5, translation=(150, -200)) img_warp = tf.warp(image, tform) detector = CENSURE() fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(12, 6)) detector.detect(image) ax[0].imshow(image, cmap=plt.cm.gray) ax[0].scatter(detector.keypoints[:, 1], detector.keypoints[:, 0], 2 ** detector.scales, facecolors='none', edgecolors='r') ax[0].set_title("Original Image") detector.detect(img_warp) ax[1].imshow(img_warp, cmap=plt.cm.gray) ax[1].scatter(detector.keypoints[:, 1], detector.keypoints[:, 0], 2 ** detector.scales, facecolors='none', edgecolors='r') ax[1].set_title('Transformed Image') for a in ax: a.axis('off') plt.tight_layout() plt.show()
def CENSURETransform(img1, img2, fp1, fp2): bw_img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY).astype("double") bw_img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY).astype("double") cen = CENSURE() cen.detect(bw_img1) kp1 = cen.keypoints cen.detect(bw_img2) kp2 = cen.keypoints kp1 = np.append(kp1, fp1, axis=0) kp2 = np.append(kp2, fp2, axis=0) matches = match_descriptors(kp1, kp2, cross_check=True) # fig, ax = plt.subplots(nrows=1, ncols=1) # plot_matches(ax, img1, img2, kp1, kp2, matches) # plt.show() points1 = kp1[matches[:, 0]] points2 = kp2[matches[:, 1]] M, mask = cv2.findHomography(points1, points2, cv2.RANSAC) if M is None: return else: dst = cv2.warpPerspective(img1, M, (img1.shape[1], img1.shape[0])) return dst
class CENSUREClassifier(LocalFeatures): """ CenSurE feature detector [2]. SURF [4, 5] as feature descriptor. Censure implementation from skimage """ def __init__(self, testDataObj, svmType=None, name="", description=""): super(CENSUREClassifier, self).__init__(Settings.C_TESTDATA_SEGMENTS, svmType, Settings.C_SVM_PARAMS, testDataObj, name, True, description) self.detector = None self.bow = BagOfWords(Settings.C_BOW_DIMENSION, "SURF") self.descriptor = cv.SURF() self.detector = CENSURE(non_max_threshold=0.01) def detect_keypoints(self, image): self.detector.detect(image) keypointList = self.detector.keypoints # convert coordinate keypoints to opencv keypoints # regarding class_id and size values see http://stackoverflow.com/questions/17981126/what-is-the-meaning-and-use-of-class-member-class-id-of-class-cvkeypoint-in-op and http://stackoverflow.com/questions/34104297/how-to-convert-given-coordinates-to-kaze-keypoints-in-python-with-opencv return [cv.KeyPoint(p[0], p[1], 5, _class_id=0) for p in keypointList] def descibe_keypoints(self, image, keypoints): return self.descriptor.compute(image, keypoints) def __getstate__(self): result = self.__dict__.copy() del result['detector'] return result def __setstate__(self, dict): self.__dict__ = dict self.detector = cv.SURF() def __str__(self): return "CENSURE"
def censure_feature(im): """ CENSURE Feature Extraction :param im: image :return: image keypoints """ # convert rgb to grayscale if needed if im.ndim == 3: image = rgb2gray(im) else: image = np.atleast_2d(im) censure = CENSURE() censure.detect(image) # censure.scales # Fetch only 10 keypoints censure_array = np.zeros(10) if len(censure.keypoints.ravel()) > 10: censure_array = censure.keypoints.ravel()[:10] else: censure_array[:len(censure.keypoints.ravel() )] = censure.keypoints.ravel() return censure_array
def censure_features(img_filepath): img_orig = cv2.imread(img_filepath, 0) tform = tf.AffineTransform(scale=(1.5, 1.5), rotation=0.5, translation=(150, -200)) img_warp = tf.warp(img_orig, tform) detector = CENSURE() fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(12, 6)) ax1.imshow(img_orig, cmap=plt.cm.gray) ax1.set_title("Original Image") detector.detect(img_orig) ax2.imshow(img_orig, cmap=plt.cm.gray) ax2.scatter(detector.keypoints[:, 1], detector.keypoints[:, 0], 2**detector.scales, facecolors='none', edgecolors='r') ax2.set_title("Censure features") plt.show()
# - And - of course - I'm yet to find and quantify meaningful and generic features. # # And a funny thought - what if I cut each sample into half along the symmetry axis and treat them as two samples? How shameless would that be? I get double sample size, I generalize better, I simplify and reduce the features I work with... # # ![Evil genius laughter][1] # # [1]: http://www.jimbowley.com/wp-content/uploads/2011/06/DrEvilPinky.jpg # In[ ]: # check a few scikitlearn image feature extractions, if they can help us from skimage.feature import corner_harris, corner_subpix, corner_peaks, CENSURE detector = CENSURE() detector.detect(img) coords = corner_peaks(corner_harris(img), min_distance=5) coords_subpix = corner_subpix(img, coords, window_size=13) plt.subplot(121) plt.title('CENSURE feature detection') plt.imshow(img, cmap='Set3') plt.scatter(detector.keypoints[:, 1], detector.keypoints[:, 0], 2 ** detector.scales, facecolors='none', edgecolors='r') plt.subplot(122) plt.title('Harris Corner Detection') plt.imshow(img, cmap='Set3') # show me the leaf plt.plot(coords[:, 1], coords[:, 0], '.b', markersize=5)
print("The folder name provided wasn't: sample, train, or test; using sample folder.") folder1 = "sample" except: print("Didn't give me a folder; using sample folder.") folder1 = "sample" file_names = os.listdir("/home/dick/Documents/Kaggle/" + folder1) N = len(file_names) print("Progress: 0 %"), for i in range(N): img = cv2.imread(folder1 + "/" + file_names[i],1) gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) censure = CENSURE() censure.detect(gray) kp = censure.keypoints scales = censure.scales print(len(kp)) plt.imshow(img) plt.axis('off') plt.scatter(censure.keypoints[:, 1], censure.keypoints[:, 0], 2 ** censure.scales, facecolors='none', edgecolors='r') plt.show() #Store and Retrieve keypoint features temp_array = [] temp = pickle_keypoints(kp, scales)
class LogoFinder: def __init__(self): self.heatmapper = Heatmapper( point_diameter=15, point_strength=0.05, opacity=3, colours='reveal', ) self.heatmap_name = 'heatmap.png' self.detector = CENSURE() @staticmethod def read_image(image_path): image = cv2.imread(image_path) bgr = image gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) return bgr, rgb, gray def make_heatmap(self, image_path): bgr_heatmap, rgb_heatmap, gray_heatmap = self.read_image(image_path) # Find contours and edges contours = find_contours(gray_heatmap, 100, fully_connected='high', positive_orientation='high') edge_roberts = roberts(gray_heatmap) edge_sobel = sobel(gray_heatmap) self.detector.detect(gray_heatmap) thresh = threshold_otsu(gray_heatmap) bw = closing(gray_heatmap > thresh, square(3)) cleared = clear_border(bw) label_image = label(cleared) image_label_overlay = label2rgb(label_image, image=gray_heatmap) edges = edge_roberts / 2 + edge_sobel / 2 dots = list() for x in range(edges.shape[1]): for y in range(edges.shape[0]): for i in range(int(edges[y][x] * 2)): dots.append([x, y]) for n, point in enumerate(self.detector.keypoints): for i in range(self.detector.scales[n] * 10): dots.append([ point[1] + (random.random() - 0.5) * 10, point[0] + (random.random() - 0.5) * 10 ]) for n, contour in enumerate(contours): if len(contour) > 50: for dot in contour: dots.append([dot[1], dot[0]]) for region in regionprops(label_image): # take regions with large enough areas if region.area >= 100: # draw rectangle around segmented coins minr, minc, maxr, maxc = region.bbox for x in range(minc, maxc): for y in range(minr, maxr, 8): dots.append([x, y]) heatmap = self.heatmapper.heatmap_on_img_path(dots, image_path) heatmap.save(self.heatmap_name) bgr_heatmap, rgb_heatmap, gray_heatmap = self.read_image( self.heatmap_name) thresh = threshold_otsu(gray_heatmap) bw = closing(gray_heatmap > thresh, square(3)) cleared = clear_border(bw) label_image = label(cleared) image_label_overlay = label2rgb(label_image, image=gray_heatmap) for n, region in enumerate(regionprops(label_image)): minr, minc, maxr, maxc = region.bbox if maxr - minr > 30 and maxc - minc > 30: yield minr, minc, maxr, maxc
gray1 = gray_.copy() # Read the values of the sliderbars and save them to variables min_scale = cv2.getTrackbarPos('min_scale', 'censure') max_scale = cv2.getTrackbarPos('max_scale', 'censure') if min_scale is 0: min_scale = 1 if min_scale + max_scale < 3: max_scale = min_scale + 2 mode = mode_dict[str(cv2.getTrackbarPos('mode', 'censure'))] non_max_threshold = float(cv2.getTrackbarPos('non_max_threshold', 'censure'))/1000 line_threshold = cv2.getTrackbarPos('line_threshold', 'censure') # Create a CENSURE feature detector censure = CENSURE(min_scale=min_scale, max_scale=max_scale, mode=mode, non_max_threshold=non_max_threshold, line_threshold=line_threshold) # Obtain the CENSURE features censure.detect(blue1) kp_blue, scale_blue = censure.keypoints, censure.scales censure.detect(green1) kp_green, scale_green = censure.keypoints, censure.scales censure.detect(red1) kp_red, scale_red = censure.keypoints, censure.scales censure.detect(gray1) kp_gray, scale_gray = censure.keypoints, censure.scales # Print the # of features if it has changed between iterations num_kp[0] = len(kp_blue) num_kp[1] = len(kp_green) num_kp[2] = len(kp_red)
edge_sobel.shape # In[15]: # CENSURE feature detector import skimage.transform as tf from skimage.feature import CENSURE tform = tf.AffineTransform(scale=(1.5, 1.5), rotation=0.5, translation=(0, -100)) # tf.AffineTransform(scale=(1.2, 1.2), translation=(0, -100)) img1 = color.rgb2gray(resize(image, (250, 250))) img2 = tf.warp(img1, tform) detector = CENSURE(mode='Octagon') fig, ax = plt.subplots(nrows=1, ncols=2) plt.gray() detector.detect(image_grey) ax[0].imshow(image_grey) ax[0].axis('off') ax[0].scatter(detector.keypoints[:, 1], detector.keypoints[:, 0], 2**detector.scales, facecolors='none', edgecolors='r')
def main(): DataSet = [] LabelSet = [] lengthV = [] trainPaths = ['./fruit/' + c + '_train/' for c in classes] testPaths = ['./fruit/' + c + ' test/' for c in classes] resList = [] boolList = [] pos = 0 ind = 0 #if you wish to automatically perform both feature selection optimzation and svm optimization at the same time #comment out next line and comment in section above #Warning: Very long runtime for algorithm because of grid search useList = [True, True, True, True] #print(useList) for c in range(len(classes)): #get label for features to be added className = classes[c] #get file path for folder with images path = trainPaths[c] #initialize feature detectors/extractors #Censure extractor detector = CENSURE() #ORB extractor detector2 = ORB(n_keypoints=50) #get all file names from the folder files = os.listdir(path) nfiles = len(files) #repeat for each file for i in range(nfiles): #initialize feature vector as empty list featureVector = [] infile = files[i] #read image as grayscale numpy.ndarray img = io.imread(path + infile, as_grey=True) #get histogram for grayscale value intensity hist = np.histogram(img, bins=256) #resize image img = resize(img, (400, 400)) #extract features but do not yet add them to feature vector detector2.detect_and_extract(img) #extract HOG features, add them to featurevector a = fd = hog(img, orientations=9, pixels_per_cell=(32, 32), cells_per_block=(1, 1), visualise=False) #add histogramm to featurevector for h in hist: fd = np.append(fd, h) #if corresponding boolean in uselist is true add features to featureVector --> Feature selection happens here if (useList[0]): detector.detect(img) fd = np.append(fd, [np.array(detector.keypoints).flatten()]) if (useList[1]): fd = np.append(fd, detector2.keypoints) if (useList[2]): fd = np.append(fd, edgeExtract(img, 100)) if (useList[3]): corners = corner_peaks(corner_harris(img), min_distance=1) fd = np.append(fd, corners) #get length of featurevector for later operations lengthV.append(len(fd)) #add featureVector list to dataset that is fed into svm DataSet.append(fd) #get label name ind = classes.index(className) #add label to label dataset that is fed into svm LabelSet.append(ind) #get length of biggest sized featurevector max = np.amax(lengthV) lengthV = [] DataSet2 = [] #pad dataset with zeroes so that all featurevectors have the same length --> important for svm for d in DataSet: d = np.pad(d, (0, max - len(d)), 'constant') DataSet2.append(d) lengthV.append(len(d)) DataSet = DataSet2 #perform a grid search with maximum number of possible threads (usually 4) if __name__ == '__main__': gridSearch(DataSet, LabelSet) #train and examine svm with default values for comparison later clf = svm.SVC(kernel='rbf', C=10.0, gamma=1.0000000000000001e-09) clf.fit(DataSet, LabelSet) joblib.dump(clf, classes[0] + ' ' + classes[1] + '.pk1') scores = cross_val_score(clf, DataSet, LabelSet, cv=10) #print results of default svm print(scores) print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
from skimage.color import rgb2gray from skimage.color import rgb2gray from skimage.io import imread import matplotlib.pyplot as plt img1 = rgb2gray( imread( '/home/dek/makerfaire-booth/2018/burger/experimental/dek/train_object_detector/images/bottombun.0.00.27.34.-24.61.0.81.png' )) img2 = rgb2gray( imread( '/home/dek/makerfaire-booth/2018/burger/experimental/dek/train_object_detector/images/bottombun.0.02.-49.74.26.68.1.83.png' )) detector = CENSURE() fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(12, 6)) detector.detect(img1) ax[0].imshow(img1, cmap=plt.cm.gray) ax[0].scatter(detector.keypoints[:, 1], detector.keypoints[:, 0], 2**detector.scales, facecolors='none', edgecolors='r') ax[0].set_title("Original Image") detector.detect(img2) ax[1].imshow(img2, cmap=plt.cm.gray)
def __init__(self, testDataObj, svmType=None, name="", description=""): super(CENSUREClassifier, self).__init__(Settings.C_TESTDATA_SEGMENTS, svmType, Settings.C_SVM_PARAMS, testDataObj, name, True, description) self.detector = None self.bow = BagOfWords(Settings.C_BOW_DIMENSION, "SURF") self.descriptor = cv.SURF() self.detector = CENSURE(non_max_threshold=0.01)
implementation. """ from skimage import data from skimage import transform as tf from skimage.feature import CENSURE from skimage.color import rgb2gray import matplotlib.pyplot as plt img_orig = rgb2gray(data.astronaut()) tform = tf.AffineTransform(scale=(1.5, 1.5), rotation=0.5, translation=(150, -200)) img_warp = tf.warp(img_orig, tform) detector = CENSURE() fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(12, 6)) plt.tight_layout() detector.detect(img_orig) ax[0].imshow(img_orig, cmap=plt.cm.gray) ax[0].axis('off') ax[0].scatter(detector.keypoints[:, 1], detector.keypoints[:, 0], 2 ** detector.scales, facecolors='none', edgecolors='r') ax[0].set_title("Original Image") detector.detect(img_warp) ax[1].imshow(img_warp, cmap=plt.cm.gray)
for i in range(N): # Read image from file, then inspect the image dimensions img = cv2.imread(folder1 + "/" + file_names[i],1) height, width, channels = img.shape gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) del img # Make a PIL image so we can use PIL.Image.thumbnail to resize if needed gray_ = Image.fromarray(gray) # Check if dimensions are above desired, if so then resize keepig aspect ratio m, n = 512, 512 if height > m or width > n: gray_.thumbnail((m,n), Image.ANTIALIAS) censure = CENSURE(min_scale=min_scale, max_scale=max_scale, mode=mode, non_max_threshold=non_max_threshold, line_threshold=line_threshold) censure.detect(gray_) kp = censure.keypoints scales = censure.scales # print(len(scales)) #Store keypoint features temp_array = [] temp = pickle_keypoints(kp, scales) temp_array.append(temp) pickle.dump(temp_array, open("features/" + folder1 + "/censure/gray/" + file_names[i][:-5] + "_censure.pkl", "wb")) temp = str(float((i+1)*100/N)) print("Progress: " + temp + " %")
def test_keypoints_censure_mode_validity_error(): """Mode argument in keypoints_censure can be either DoB, Octagon or STAR.""" with pytest.raises(ValueError): CENSURE(mode='dummy')
def test_keypoints_censure_scale_range_error(): """Difference between the the max_scale and min_scale parameters in keypoints_censure should be greater than or equal to two.""" with pytest.raises(ValueError): CENSURE(min_scale=1, max_scale=2)
from skimage.feature import CENSURE from skimage.color import rgb2gray from skimage.color import rgb2gray from skimage.io import imread import matplotlib.pyplot as plt img1 = rgb2gray(imread('/home/dek/makerfaire-booth/2018/burger/experimental/dek/train_object_detector/images/bottombun.0.00.27.34.-24.61.0.81.png')) img2 = rgb2gray(imread('/home/dek/makerfaire-booth/2018/burger/experimental/dek/train_object_detector/images/bottombun.0.02.-49.74.26.68.1.83.png')) detector = CENSURE() fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(12, 6)) detector.detect(img1) ax[0].imshow(img1, cmap=plt.cm.gray) ax[0].scatter(detector.keypoints[:, 1], detector.keypoints[:, 0], 2 ** detector.scales, facecolors='none', edgecolors='r') ax[0].set_title("Original Image") detector.detect(img2) ax[1].imshow(img2, cmap=plt.cm.gray) ax[1].scatter(detector.keypoints[:, 1], detector.keypoints[:, 0], 2 ** detector.scales, facecolors='none', edgecolors='r') ax[1].set_title('Transformed Image') for a in ax: a.axis('off') plt.tight_layout()
def test_censure_on_rectangular_images(): """Censure feature detector should work on 2D image of any shape.""" rect_image = np.random.rand(300, 200) square_image = np.random.rand(200, 200) CENSURE().detect((square_image)) CENSURE().detect((rect_image))
#!/usr/bin/env python3 #-*- Skimage Gallery -- CENSURE feature detector -*- # CENSURE检测算子(尺度不变量,旋转不变量) from skimage import data from skimage import transform as tf from skimage.feature import CENSURE from skimage.color import rgb2gray import matplotlib.pyplot as plt img_orig = rgb2gray(data.astronaut()) tform = tf.AffineTransform(scale=(1.5, 1.5), rotation=0.5, translation=(150, -200)) img_warp = tf.warp(img_orig, tform) detector = CENSURE() fig,ax = plt.subplots(1,2, figsize=(12,6)) detector.detect(img_orig) ax[0].imshow(img_orig, cmap=plt.cm.gray) ax[0].scatter(detector.keypoints[:,1], detector.keypoints[:,0], 2**detector.scales, facecolors='none', edgecolors='r') ax[0].set_title('Original Image') detector.detect(img_warp) ax[1].imshow(img_warp, cmap=plt.cm.gray) ax[1].scatter(detector.keypoints[:,1], detector.keypoints[:,0], 2**detector.scales, facecolors='none', edgecolors='r') for a in ax: a.axis('off') plt.tight_layout() plt.show()
def test_keypoints_censure_color_image_unsupported_error(): """Censure keypoints can be extracted from gray-scale images only.""" with pytest.raises(ValueError): CENSURE().detect(np.zeros((20, 20, 3)))