def detect(self, img_name, image, stepSize=None, windowSize=None, scale=None, minSize=None):
        windowSize = windowSize if windowSize is not None else self.windowSize
        stepSize = stepSize if stepSize is not None else self.stepSize
        scale = scale if scale is not None else self.scale
        minSize = minSize if minSize is not None else self.minSize

        window_num = 0
        polygons_metal = list()
        polygons_thatch = list()
        rects_metal = list()
        rects_thatch = list()

        #loop through pyramid

        for level, resized in enumerate(utils.pyramid(image, scale=scale, minSize=minSize)):
            for (x, y, window) in utils.sliding_window(resized, stepSize=stepSize, windowSize=windowSize):
                
                #self.debug_scaling(image, img_name, resized, x, y, level):

                # if the window does not meet our desired window size, ignore it
                if window.shape[0] != windowSize[0] or window.shape[1] != windowSize[1]:
                    continue
                window_num += 1

                #save the correctly translated coordinates of this window
                polygon, rectangle = self.get_translated_coords(x, y, level, scale, windowSize)
                polygons_metal.append(polygon)
                rects_metal.append(rectangle)
                
                polygons_thatch.append(polygon)
                rects_thatch.append(rectangle)
        self.total_window_num += window_num
        rects = {'thatch': rects_thatch, 'metal': rects_metal}
        polygons = {'thatch': polygons_thatch, 'metal': polygons_metal}
        return polygons, rects
Esempio n. 2
0
    def detect(self, image):
        clone = image.copy()

        image = rgb2gray(image)

        # list to store the detections
        detections = []
        # current scale of the image
        downscale_power = 0

        # downscale the image and iterate
        for im_scaled in pyramid(image,
                                 downscale=self.downscale,
                                 min_size=self.window_size):
            # if the width or height of the scaled image is less than
            # the width or height of the window, then end the iterations
            if im_scaled.shape[0] < self.window_size[1] or im_scaled.shape[
                    1] < self.window_size[0]:
                break
            for (x, y, im_window) in sliding_window(im_scaled,
                                                    self.window_step_size,
                                                    self.window_size):
                if im_window.shape[0] != self.window_size[
                        1] or im_window.shape[1] != self.window_size[0]:
                    continue

                # calculate the HOG features
                feature_vector = hog(im_window)
                X = np.array([feature_vector])
                prediction = self.clf.predict(X)
                if prediction == 1:
                    x1 = int(x * (self.downscale**downscale_power))
                    y1 = int(y * (self.downscale**downscale_power))
                    detections.append(
                        (x1, y1, x1 + int(self.window_size[0] *
                                          (self.downscale**downscale_power)),
                         y1 + int(self.window_size[1] *
                                  (self.downscale**downscale_power))))

            # Move the the next scale
            downscale_power += 1

        # Display the results before performing NMS
        clone_before_nms = clone.copy()
        for (x1, y1, x2, y2) in detections:
            # Draw the detections
            cv2.rectangle(clone_before_nms, (x1, y1), (x2, y2), (0, 255, 0),
                          thickness=2)

        # Perform Non Maxima Suppression
        detections = non_max_suppression(np.array(detections), self.threshold)

        clone_after_nms = clone
        # Display the results after performing NMS
        for (x1, y1, x2, y2) in detections:
            # Draw the detections
            cv2.rectangle(clone_after_nms, (x1, y1), (x2, y2), (0, 255, 0),
                          thickness=2)

        return clone_before_nms, clone_after_nms
Esempio n. 3
0
    def detect(self,
               img_name,
               image,
               stepSize=None,
               windowSize=None,
               scale=None,
               minSize=None):
        windowSize = windowSize if windowSize is not None else self.windowSize
        stepSize = stepSize if stepSize is not None else self.stepSize
        scale = scale if scale is not None else self.scale
        minSize = minSize if minSize is not None else self.minSize

        window_num = 0
        polygons_metal = list()
        polygons_thatch = list()
        rects_metal = list()
        rects_thatch = list()

        #loop through pyramid

        for level, resized in enumerate(
                utils.pyramid(image, scale=scale, minSize=minSize)):
            for (x, y, window) in utils.sliding_window(resized,
                                                       stepSize=stepSize,
                                                       windowSize=windowSize):

                #self.debug_scaling(image, img_name, resized, x, y, level):

                # if the window does not meet our desired window size, ignore it
                if window.shape[0] != windowSize[0] or window.shape[
                        1] != windowSize[1]:
                    continue
                window_num += 1

                #save the correctly translated coordinates of this window
                polygon, rectangle = self.get_translated_coords(
                    x, y, level, scale, windowSize)
                polygons_metal.append(polygon)
                rects_metal.append(rectangle)

                polygons_thatch.append(polygon)
                rects_thatch.append(rectangle)
        self.total_window_num += window_num
        rects = {'thatch': rects_thatch, 'metal': rects_metal}
        polygons = {'thatch': polygons_thatch, 'metal': polygons_metal}
        return polygons, rects
Esempio n. 4
0
 def detect(self, image):
     clone = image.copy()
     image = rgb2gray(image)
     detections = []  # 记录识别的目标
     downscale_power = 0  # 当前下采样系数
     # 迭代下采样
     for im_scaled in pyramid(image,
                              downscale=self.downscale,
                              min_size=self.window_size):
         if im_scaled.shape[0] < self.window_size[1] or im_scaled.shape[
                 1] < self.window_size[0]:
             # 如果采样尺度小于模板窗,就停止迭代
             break
         for (x, y, im_window) in sliding_window(im_scaled,
                                                 self.window_step_size,
                                                 self.window_size):
             if im_window.shape[0] != self.window_size[
                     1] or im_window.shape[1] != self.window_size[0]:
                 continue
             feature_vector = hog(im_window, block_norm="L1")  # 计算HOG特征
             X = np.array([feature_vector])
             prediction = self.clf.predict(X)
             if prediction == 1:
                 x1 = int(x * (self.downscale**downscale_power))
                 y1 = int(y * (self.downscale**downscale_power))
                 detections.append(
                     (x1, y1, x1 + int(self.window_size[0] *
                                       (self.downscale**downscale_power)),
                      y1 + int(self.window_size[1] *
                               (self.downscale**downscale_power))))
         downscale_power += 1  # 移动到下一个尺度
     clone_before_nms = clone.copy()  # 用来显示NMS处理前的结果
     for (x1, y1, x2, y2) in detections:
         cv2.rectangle(clone_before_nms, (x1, y1), (x2, y2), (0, 255, 0),
                       thickness=2)  # 描框
     detections = non_max_suppression(np.array(detections),
                                      self.threshold)  # NMS处理后的结果
     clone_after_nms = clone
     # NMS处理后的结果
     for (x1, y1, x2, y2) in detections:
         cv2.rectangle(clone_after_nms, (x1, y1), (x2, y2), (0, 255, 0),
                       thickness=2)  # 描框
     return clone_before_nms, clone_after_nms
Esempio n. 5
0
def detect(origin_img, hog, clf):
    windows = []
    for img, scale in pyramid(origin_img):
        points = []
        features = []
        for (x1, y1, window) in sliding_window(img, 8, (128, 128)):
            if window.shape[0] == 128 and window.shape[1] == 128:
                features.append(hog.compute(window).reshape(-1))
                points.append([x1, y1])

        if len(features) == 0:
            continue

        Y = clf.predict(features)
        points = np.asarray(points)[Y==1] * scale
        w = np.concatenate((points, points + 128*scale), axis=1).astype(int)
        if w.shape[0] > 0:
            windows.append(w.tolist())
    return windows
    def detect_roofs(self, image):
        # loop over the image pyramid
        for resized in utils.pyramid(image, scale=1.5):
            # loop over the sliding window for each layer of the pyramid
            for (x, y, window) in utils.sliding_window(resized, stepSize=32, windowSize=(winW, winH)):
                # if the window does not meet our desired window size, ignore it
                if window.shape[0] != winH or window.shape[1] != winW:
                    continue

                # THIS IS WHERE YOU WOULD PROCESS YOUR WINDOW, SUCH AS APPLYING A
                # MACHINE LEARNING CLASSIFIER TO CLASSIFY THE CONTENTS OF THE
                # WINDOW

                # since we do not have a classifier, we'll just draw the window
                clone = resized.copy()
                cv2.rectangle(clone, (x, y), (x + winW, y + winH), (0, 255, 0), 2)
                cv2.imshow("Window", clone)
                cv2.waitKey(1)
                time.sleep(0.025)
Esempio n. 7
0
y_train = [y_train[:, i] for i in range(4)]
y_test = [y_test[:, i] for i in range(4)]

_run = 2
n_layer = 10
input_dim = x.shape[1]
output_dim = [10, 48, 10, 48]
output_name = ["TTtP", "ATaP", "TTtH", "ATaH"]
output_loss = [0.5, 1.0, 0.5, 1.0]

results = []
if os.path.isfile(f"results/multitask_{_run}.json"):
    results = json_read(f"results/multitask_{_run}.json")

for l in range(1, n_layer):
    h_units = pyramid(input_dim, sum(output_dim), l)

    i = Input(shape=[input_dim])
    h = i
    for units in h_units:
        h = Dense(units, activation="relu")(h)
        h = Dropout(0.5)(h)
    o = [
        Dense(dim, activation="softmax", name=name)(h)
        for dim, name in zip(output_dim, output_name)
    ]

    m = Model(inputs=i, outputs=o)
    m.compile(
        "adam",
        "sparse_categorical_crossentropy",
Esempio n. 8
0
	def detect(self, image):
		clone = image.copy();

		image1 = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY);
		image1_hist = cv2.calcHist([image1], [0], None, [256], [0, 256]);
		#print(image1_hist[1])

		# list to store the detections
		detections = [];
		# current scale of the image
		downscale_power = 0;
		no_of_windows = 0;
		no_of_windows_cc = 0;
		w = self.window_size[1] * self.window_size[0];
		# downscale the image and iterate
		for im_scaled in pyramid(image, downscale=self.downscale, min_size=self.window_size):
			# if the width or height of the scaled image is less than
			# the width or height of the window, then end the iterations
			if im_scaled.shape[0] < self.window_size[1] or im_scaled.shape[1] < self.window_size[0]:
				break
			for (x, y, im_window) in sliding_window(im_scaled, self.window_step_size, self.window_size):
				if im_window.shape[0] != self.window_size[1] or im_window.shape[1] != self.window_size[0]:
					continue
				no_of_windows = no_of_windows + 1;
				#im_window1 = rgb2gray(im_window)
				im_window1 = cv2.cvtColor(im_window, cv2.COLOR_BGR2GRAY);
				im_window1_hist = cv2.calcHist([im_window1], [0], None, [256], [0, 256]);
				ss_sum = 0;
				rows,cols = im_window1.shape;
				for i in range(rows):
					for j in range(cols):
						pixel = im_window1[i][j];
						ss_sum = ss_sum + min(abs((image1_hist[pixel] - im_window1_hist[pixel])),abs(im_window1_hist[pixel]));
				#for pixel in getPixels(im_window1):
					
				#print(ss_sum);
				ss_sum = ss_sum / w;
				'''
				print("ss_sum");
				print(ss_sum);
				SS = (1 - (ss_sum));
				print("SS of window")
				print(SS)
				'''
				#img = img_as_float(im_window)
				#segments_slic = slic(img, n_segments=10, compactness=10, sigma=1)
				#print(segments_slic)
				im_window_cc = image[y:y + 50 + self.window_size[1], x:x + 50 +self.window_size[0]]
				im_window_lab = cv2.cvtColor(im_window,cv2.COLOR_BGR2LAB);
				im_window_cc_lab =  cv2.cvtColor(im_window_cc,cv2.COLOR_BGR2LAB);
				channels = cv2.split(im_window_lab)       # Set the image channels
				colors = ("l", "a", "b")        # Initialize tuple
				for (i, col) in zip(channels, colors):       # Loop over the image channels
					im_window_hist = cv2.calcHist([i], [0], None, [256], [0, 256])   # Create a histogram for current channel
				
				channels = cv2.split(im_window_cc_lab)
					
				for (i, col) in zip(channels, colors):       # Loop over the image channels
					im_window_cc_hist = cv2.calcHist([i], [0], None, [256], [0, 256])   # Create a histogram for current channel
 
 
				coeff=cv2.compareHist(im_window_hist,im_window_cc_hist,cv2.cv2.HISTCMP_CHISQR);
				#if (ss_sum > 200):
				if (coeff > 1000 or ss_sum > 200):#smaller the COEFF value, silimar are the images. We want difference in color hence, bigger numbers
					
					feature_vector = hog(im_window1)
					X = np.array([feature_vector])
					prediction = self.clf.predict(X)
					if prediction == 1:
						x1 = int(x * (self.downscale ** downscale_power))
						y1 = int(y * (self.downscale ** downscale_power))
						detections.append((x1, y1,x1 + int(self.window_size[0] * (self.downscale ** downscale_power)),y1 + int(self.window_size[1] * 								(self.downscale ** downscale_power))))
						no_of_windows_cc = no_of_windows_cc + 1
			downscale_power += 1
		print("Number of windows without CC")
		print(no_of_windows)
		
		print("Number of windows with cc + ss")
		print(no_of_windows_cc)
		clone_detected = clone.copy()
		for (x1, y1, x2, y2) in detections:
			cv2.rectangle(clone_detected, (x1, y1), (x2, y2), (0, 255, 0), thickness=2)

		return clone_detected
Esempio n. 9
0
y_train = [y_train[:, i] for i in range(4)]
y_test = [y_test[:, i] for i in range(4)]

input_dim = x.shape[1]
output_dim = [10, 48, 10, 48]
output_name = ["TTtP", "ATaP", "TTtH", "ATaH"]
n_layer = 10


for target in range(4):
    results = []
    if os.path.isfile(f"results/singletask_{target}.json"):
        results = json_read(f"results/singletask_{target}.json")

    for l in range(1, n_layer):
        h_units = pyramid(input_dim, output_dim[target], l)

        i = Input(shape=[input_dim])
        h = i
        for units in h_units:
            h = Dense(units, activation="relu")(h)
            h = Dropout(0.5)(h)
        o = Dense(output_dim[target], activation="softmax", name=output_name[0])(h)

        m = Model(inputs=i, outputs=o)
        m.compile("adam", "sparse_categorical_crossentropy", metrics=["acc"])

        init = m.get_weights()
        for _ in range(50):
            m.set_weights(init)
            m.fit(