class Runner: def __init__(self): self.loader = UADetracLoader() self.preprocess = PreprocessingModule() self.network = UNet() self.cluster = ClusterModule() self.index = IndexingModule() def run(self): """ Steps: 1. Load the data 2. Preprocess the data 3. Train the network 4a. Cluster the data 4b. Postprocess the data 5a. Generate compressed form 5b. Generate indexes and preform CBIR :return: ??? """ import time st = time.time() # 1. Load the image images = self.loader.load_cached_images() labels = self.loader.load_cached_labels() vehicle_labels = labels['vehicle'] video_start_indices = self.loader.get_video_start_indices() print("Done loading images in", time.time() - st, "(sec)") # 2. Begin preprocessing st = time.time() segmented_images = self.preprocess.run(images, video_start_indices) print("Done with background subtraction in", time.time() - st, "(sec)") self.preprocess.saveSegmentedImages() st = time.time() self.network.train(images, segmented_images) final_compressed_images, final_segmented_images = self.network.execute( ) print("Done training the main network in", time.time() - st, "(sec)") st = time.time() cluster_labels = self.cluster.run(final_compressed_images) print("Done clustering in", time.time() - st, "(sec)") st = time.time() self.index.train(images, final_segmented_images, vehicle_labels)
def load_jnet_results(images, model_directory, segmented_images_directory=None): """ we need to load the segmented images instead of the original ones :return: """ ## need to check if segmented images exist if segmented_images_directory is None: ## load the model network = UNet() ## execute the model on the images of interest _, level4_outputs = network.execute(images, load_dir=model_directory) ## we should overlap the images level4_overlapped_outputs = util_custom.overlap(images, level4_outputs) return level4_overlapped_outputs else: return np.load(segmented_images_directory)
def __init__(self): self.loader = UADetracLoader() self.preprocess = PreprocessingModule() self.network = UNet() self.cluster = ClusterModule() self.index = IndexingModule()
segmented_images = self.preprocess.run(images, video_start_indices) print("Done with background subtraction in", time.time() - st, "(sec)") self.preprocess.saveSegmentedImages() st = time.time() self.network.train(images, segmented_images) final_compressed_images, final_segmented_images = self.network.execute( ) print("Done training the main network in", time.time() - st, "(sec)") st = time.time() cluster_labels = self.cluster.run(final_compressed_images) print("Done clustering in", time.time() - st, "(sec)") st = time.time() self.index.train(images, final_segmented_images, vehicle_labels) if __name__ == "__main__": # 0. Initialize the modules loader = UADetracLoader() preprocess = PreprocessingModule() network = UNet() import time st = time.time() # 1. Load the images (cached images is fine) images = loader.load_cached_images() labels = loader.load_cached_labels() video_start_indices = loader.get_video_start_indices()
def train_conservative(train_images, preprocess, name): segmented_images = preprocess.run(train_images, None, load=True) model = UNet() model.train(train_images, segmented_images, save_name=name) return
if __name__ == "__main__": # %% import os import time from loaders.uadetrac_loader import UADetracLoader from eva_storage.preprocessingModule import PreprocessingModule from eva_storage.UNet import UNet from eva_storage.postprocessingModule import PostprocessingModule from logger import Logger loader = UADetracLoader() preprocess = PreprocessingModule() bare_model = UNet() postprocess = PostprocessingModule() logger = Logger() st = time.time() # 1. Load the images (cached images is fine) images = loader.load_cached_images() labels = loader.load_cached_labels() video_start_indices = loader.get_video_start_indices() print(f"Done loading images in {time.time() - st} (sec)") directory_begin = '/nethome/jbang36/eva_jaeho/data/models/' """ model_names = ['history20_dist2thresh300', 'history20_dist2thresh300_bloat_lvl2', 'history20_dist2thresh300_bloat_lvl3',