sub_folders.sort() for folder in sub_folders: # Get color image filenames test_filenames = utils.get_file_names_in_sequence( sequence_root=folder) if len(test_filenames) == 0: print("Sequence {} does not have relevant files".format( str(folder))) continue test_dataset = dataset.SfMDataset( image_file_names=test_filenames, folder_list=folder_list, adjacent_range=adjacent_range, image_downsampling=image_downsampling, inlier_percentage=inlier_percentage, network_downsampling=network_downsampling, load_intermediate_data=load_intermediate_data, intermediate_data_root=precompute_root, phase="test", pre_workers=num_pre_workers, visible_interval=visibility_overlap) test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=1, shuffle=False, num_workers=num_workers) # Update progress bar tq = tqdm.tqdm(total=len(test_loader), dynamic_ncols=True, ncols=40) tq.set_description('Test')
print("Tensorboard visualization at {}".format(str(log_root))) # Get color image filenames train_filenames, val_filenames, test_filenames = utils.get_color_file_names_by_bag(training_data_root, validation_patient_id=validation_patient_id, testing_patient_id=testing_patient_id, id_range=id_range) folder_list = utils.get_parent_folder_names(training_data_root, id_range=id_range) # Build training and validation dataset train_dataset = dataset.SfMDataset(image_file_names=train_filenames, folder_list=folder_list, adjacent_range=adjacent_range, transform=training_transforms, downsampling=input_downsampling, network_downsampling=network_downsampling, inlier_percentage=inlier_percentage, use_store_data=load_intermediate_data, store_data_root=training_data_root, phase="train", is_hsv=is_hsv, num_pre_workers=num_workers, visible_interval=30, rgb_mode="rgb") validation_dataset = dataset.SfMDataset(image_file_names=val_filenames, folder_list=folder_list, adjacent_range=adjacent_range, transform=None, downsampling=input_downsampling, network_downsampling=network_downsampling, inlier_percentage=inlier_percentage, use_store_data=True, store_data_root=training_data_root, phase="validation", is_hsv=is_hsv, num_pre_workers=num_workers, visible_interval=30, rgb_mode="rgb")
train_filenames, val_filenames, test_filenames = \ utils.get_color_file_names_by_bag(root=training_data_root, training_patient_id=training_patient_id, validation_patient_id=validation_patient_id, testing_patient_id=testing_patient_id) sequence_path_list = utils.get_parent_folder_names(training_data_root, id_range=id_range) # Build training and validation dataset train_dataset = dataset.SfMDataset(image_file_names=train_filenames, folder_list=sequence_path_list, adjacent_range=adjacent_range, image_downsampling=image_downsampling, inlier_percentage=inlier_percentage, network_downsampling=network_downsampling, load_intermediate_data=load_intermediate_data, intermediate_data_root=precompute_root, sampling_size=sampling_size, phase="train", heatmap_sigma=heatmap_sigma, pre_workers=num_pre_workers, visible_interval=visibility_overlap, num_iter=num_iter) train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers) val_dataset = dataset.SfMDataset(image_file_names=val_filenames, folder_list=sequence_path_list, adjacent_range=adjacent_range, image_downsampling=image_downsampling, inlier_percentage=inlier_percentage, network_downsampling=network_downsampling,
# Get color image filenames test_filenames = utils.get_filenames_from_frame_indexes( sequence_root, selected_frame_index_list) folder_list = utils.get_parent_folder_names(evaluation_data_root, id_range=id_range) if phase == "validation": test_dataset = dataset.SfMDataset( image_file_names=test_filenames, folder_list=folder_list, adjacent_range=adjacent_range, transform=None, downsampling=input_downsampling, network_downsampling=network_downsampling, inlier_percentage=inlier_percentage, use_store_data=load_intermediate_data, store_data_root=evaluation_data_root, phase="validation", is_hsv=is_hsv, num_pre_workers=num_pre_workers, visible_interval=visibility_overlap, rgb_mode="rgb") test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False, num_workers=0) depth_estimation_model = models.FCDenseNet57(n_classes=1) # Initialize the depth estimation network with Kaiming He initialization utils.init_net(depth_estimation_model,
try: results_root.mkdir(mode=0o777, parents=True) except OSError: pass # Get color image filenames train_filenames, val_filenames, test_filenames = utils.get_color_file_names_by_bag(training_data_root, which_bag=which_bag, split_ratio=(0.5, 0.5)) training_folder_list, val_folder_list = utils.get_parent_folder_names(training_data_root, which_bag=which_bag) # Build training and validation dataset train_dataset = dataset.SfMDataset(image_file_names=train_filenames, folder_list=training_folder_list + val_folder_list, adjacent_range=adjacent_range, to_augment=True, transform=training_transforms, downsampling=downsampling, net_depth=teacher_depth, inlier_percentage=inlier_percentage, use_store_data=load_intermediate_data, store_data_root=precompute_root, use_view_indexes_per_point=use_view_indexes_per_point, visualize=visualize, phase="train", is_hsv=is_hsv) train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers) # Load trained teacher model depth_estimation_model_teacher = models.UNet(in_channels=3, out_channels=1, depth=teacher_depth, wf=filter_base, padding=True, up_mode='upsample') # Initialize the depth estimation network with Kaiming He initialization utils.init_net(depth_estimation_model_teacher, type="kaiming", mode="fan_in", activation_mode="relu", distribution="normal") # Multi-GPU running depth_estimation_model_teacher = torch.nn.DataParallel(depth_estimation_model_teacher) depth_estimation_model_teacher.train()