def process_files(positive_dir, negative_dir, color_space="bgr", channels=[0, 1, 2], hog=False, histogram=False, spatial=False, hog_size=(64, 64), hog_bins=9, cell_size=(8, 8), cells_per_block=(2, 2), histogram_bins=16, spatial_size=(16, 16)): # take care of training files positive_dir = os.path.abspath(positive_dir) negative_dir = os.path.abspath(negative_dir) if not os.path.isdir(positive_dir): raise FileNotFoundError("Directory " + positive_dir + " not found.") if not os.path.isdir(negative_dir): raise FileNotFoundError("Directory " + negative_dir + " not found.") positive_files = [ os.path.join(positive_dir, file) for file in os.listdir(positive_dir) if os.path.isfile(os.path.join(positive_dir, file)) ] negative_files = [ os.path.join(negative_dir, file) for file in os.listdir(negative_dir) if os.path.isfile(os.path.join(negative_dir, file)) ] print("{} positive files and {} negative files found.\n".format( len(positive_files), len(negative_files))) # color space info color_space = color_space.lower() if color_space == "hls": color_const = cv2.COLOR_BGR2HLS elif color_space == "hsv": color_const = cv2.COLOR_BGR2HSV elif color_space == "luv": color_const = cv2.COLOR_BGR2Luv elif color_space == "ycrcb" or color_space == "ycc": color_const = cv2.COLOR_BGR2YCrCb elif color_space == "yuv": color_const = cv2.COLOR_BGR2YUV else: color_const = -1 # store feature vectors for both positive and negative files positive_features = [] negative_features = [] time_begin = time.time() # create feature descriptor object descriptor = Descriptor(hog=hog, histogram=histogram, spatial=spatial, hog_size=hog_size, hog_bins=hog_bins, cell_size=cell_size, cells_per_block=cells_per_block, histogram_bins=histogram_bins, spatial_size=spatial_size) # extract features from each file for i, file_path in enumerate(positive_files + negative_files): image = cv2.imread(file_path) if image is None: continue if color_const > -1: image = cv2.cvtColor(image, color_const) feature_vector = descriptor.get_features(image) if i < len(positive_files): positive_features.append(feature_vector) else: negative_features.append(feature_vector) print("Features extraction completed in {:.1f} seconds\n".format( time.time() - time_begin)) num_features = len(positive_features[0]) # scale features scaler = StandardScaler().fit(positive_features + negative_features) positive_features = scaler.transform(positive_features) negative_features = scaler.transform(negative_features) # randomize lists of feature vectors by splitting them into training, cross-validation, and test sets # the ratio is 75/20/5 random.shuffle(positive_features) random.shuffle(negative_features) num_positive_train = int(round(0.75 * len(positive_features))) num_negative_train = int(round(0.75 * len(negative_features))) num_positive_val = int(round(0.2 * len(positive_features))) num_negative_val = int(round(0.2 * len(negative_features))) positive_train = positive_features[0:num_positive_train] negative_train = negative_features[0:num_negative_train] positive_val = positive_features[num_positive_train:(num_positive_train + num_positive_val)] negative_val = negative_features[num_negative_train:(num_negative_train + num_negative_val)] positive_test = positive_features[(num_positive_train + num_positive_val):] negative_test = negative_features[(num_negative_train + num_negative_val):] print( "Randomized images into training, cross-validation, and test sets.\n") print("{} images in positive training set.".format(len(positive_train))) print("{} images in positive cross-validation set.".format( len(positive_val))) print("{} images in positive test set.".format(len(positive_test))) print("{} total positive images.\n".format( len(positive_train) + len(positive_val) + len(positive_test))) print("{} images in negative training set.".format(len(negative_train))) print("{} images in negative cross-validation set.".format( len(negative_val))) print("{} images in negative test set.".format(len(negative_test))) print("{} total negative images.\n".format( len(negative_train) + len(negative_val) + len(negative_test))) # store data and parameters in a dictionary feature_data = { "positive_train": positive_train, "negative_train": negative_train, "positive_val": positive_val, "negative_val": negative_val, "positive_test": positive_test, "negative_test": negative_test, "scaler": scaler, "hog": hog, "histogram": histogram, "spatial": spatial, "color_space": color_space, "color_const": color_const, "channels": channels, "hog_size": hog_size, "hog_bins": hog_bins, "cell_size": cell_size, "cells_per_block": cells_per_block, "histogram_bins": histogram_bins, "spatial_size": spatial_size, "num_features": num_features } return feature_data
class VehicleDetector: def __init__(self, window_size, x_overlap, y_step, x_range, y_range, scale): self.window_size = window_size self.x_overlap = x_overlap self.y_step = y_step self.x_range = x_range self.y_range = y_range self.scale = scale self.windows = None def load_model(self, file_path): file_path = os.path.abspath(file_path) if not os.path.isfile(file_path): raise FileNotFoundError("File " + file_path + " not found.") model_data = pickle.load(open(file_path, "rb")) self.model = model_data["model"] self.scaler = model_data["scaler"] self.color_const = model_data["color_const"] self.channels = model_data["channels"] self.descriptor = Descriptor( hog=model_data["hog"], histogram=model_data["histogram"], spatial=model_data["spatial"], hog_size=model_data["hog_size"], hog_bins=model_data["hog_bins"], cell_size=model_data["cell_size"], cells_per_block=model_data["cells_per_block"], histogram_bins=model_data["histogram_bins"], spatial_size=model_data["spatial_size"]) return self def classify(self, image): self.windows = sliding_window((image.shape[1], image.shape[0]), window_size=self.window_size, x_overlap=self.x_overlap, y_step=self.y_step, x_range=self.x_range, y_range=self.y_range, scale=self.scale) if self.color_const > -1: image = cv2.cvtColor(image, self.color_const) feature_matrix = [ self.descriptor.get_features(image[y_upper:y_lower, x_upper:x_lower, :]) for (x_upper, y_upper, x_lower, y_lower) in self.windows ] feature_matrix = self.scaler.transform(feature_matrix) predictions = self.model.predict(feature_matrix) result_windows = [] for count, item in enumerate(predictions): if item == 1: result_windows.append(self.windows[count]) return result_windows