def process(self, args, outputs): tree = outputs['tree'] reconstruction = outputs['reconstruction'] photos = reconstruction.photos outputs['large'] = len(photos) > args.split if outputs['large']: # If we have a cluster address, we'll use a distributed workflow local_workflow = not bool(args.sm_cluster) octx = OSFMContext(tree.opensfm) split_done_file = octx.path("split_done.txt") if not io.file_exists(split_done_file) or self.rerun(): orig_max_concurrency = args.max_concurrency if not local_workflow: args.max_concurrency = max(1, args.max_concurrency - 1) log.ODM_INFO( "Setting max-concurrency to %s to better handle remote splits" % args.max_concurrency) log.ODM_INFO( "Large dataset detected (%s photos) and split set at %s. Preparing split merge." % (len(photos), args.split)) config = [ "submodels_relpath: ../submodels/opensfm", "submodel_relpath_template: ../submodels/submodel_%04d/opensfm", "submodel_images_relpath_template: ../submodels/submodel_%04d/images", "submodel_size: %s" % args.split, "submodel_overlap: %s" % args.split_overlap, ] octx.setup(args, tree.dataset_raw, photos, reconstruction=reconstruction, append_config=config, rerun=self.rerun()) octx.extract_metadata(self.rerun()) self.update_progress(5) if local_workflow: octx.feature_matching(self.rerun()) self.update_progress(20) # Create submodels if not io.dir_exists(tree.submodels_path) or self.rerun(): if io.dir_exists(tree.submodels_path): log.ODM_WARNING( "Removing existing submodels directory: %s" % tree.submodels_path) shutil.rmtree(tree.submodels_path) octx.run("create_submodels") else: log.ODM_WARNING( "Submodels directory already exist at: %s" % tree.submodels_path) # Find paths of all submodels mds = metadataset.MetaDataSet(tree.opensfm) submodel_paths = [ os.path.abspath(p) for p in mds.get_submodel_paths() ] for sp in submodel_paths: sp_octx = OSFMContext(sp) # Copy filtered GCP file if needed # One in OpenSfM's directory, one in the submodel project directory if reconstruction.gcp and reconstruction.gcp.exists(): submodel_gcp_file = os.path.abspath( sp_octx.path("..", "gcp_list.txt")) submodel_images_dir = os.path.abspath( sp_octx.path("..", "images")) if reconstruction.gcp.make_filtered_copy( submodel_gcp_file, submodel_images_dir): log.ODM_INFO("Copied filtered GCP file to %s" % submodel_gcp_file) io.copy( submodel_gcp_file, os.path.abspath(sp_octx.path("gcp_list.txt"))) else: log.ODM_INFO( "No GCP will be copied for %s, not enough images in the submodel are referenced by the GCP" % sp_octx.name()) # Reconstruct each submodel log.ODM_INFO( "Dataset has been split into %s submodels. Reconstructing each submodel..." % len(submodel_paths)) self.update_progress(25) if local_workflow: for sp in submodel_paths: log.ODM_INFO("Reconstructing %s" % sp) OSFMContext(sp).reconstruct(self.rerun()) else: lre = LocalRemoteExecutor(args.sm_cluster, self.rerun()) lre.set_projects([ os.path.abspath(os.path.join(p, "..")) for p in submodel_paths ]) lre.run_reconstruction() self.update_progress(50) # Align octx.align_reconstructions(self.rerun()) self.update_progress(55) # Aligned reconstruction is in reconstruction.aligned.json # We need to rename it to reconstruction.json remove_paths = [] for sp in submodel_paths: sp_octx = OSFMContext(sp) aligned_recon = sp_octx.path('reconstruction.aligned.json') unaligned_recon = sp_octx.path( 'reconstruction.unaligned.json') main_recon = sp_octx.path('reconstruction.json') if io.file_exists(main_recon) and io.file_exists( unaligned_recon) and not self.rerun(): log.ODM_INFO("Submodel %s has already been aligned." % sp_octx.name()) continue if not io.file_exists(aligned_recon): log.ODM_WARNING( "Submodel %s does not have an aligned reconstruction (%s). " "This could mean that the submodel could not be reconstructed " " (are there enough features to reconstruct it?). Skipping." % (sp_octx.name(), aligned_recon)) remove_paths.append(sp) continue if io.file_exists(main_recon): shutil.move(main_recon, unaligned_recon) shutil.move(aligned_recon, main_recon) log.ODM_INFO("%s is now %s" % (aligned_recon, main_recon)) # Remove invalid submodels submodel_paths = [ p for p in submodel_paths if not p in remove_paths ] # Run ODM toolchain for each submodel if local_workflow: for sp in submodel_paths: sp_octx = OSFMContext(sp) log.ODM_INFO("========================") log.ODM_INFO("Processing %s" % sp_octx.name()) log.ODM_INFO("========================") argv = get_submodel_argv(args, tree.submodels_path, sp_octx.name()) # Re-run the ODM toolchain on the submodel system.run(" ".join(map(quote, argv)), env_vars=os.environ.copy()) else: lre.set_projects([ os.path.abspath(os.path.join(p, "..")) for p in submodel_paths ]) lre.run_toolchain() # Restore max_concurrency value args.max_concurrency = orig_max_concurrency octx.touch(split_done_file) else: log.ODM_WARNING('Found a split done file in: %s' % split_done_file) else: log.ODM_INFO("Normal dataset, will process all at once.") self.progress = 0.0
def process(self, args, outputs): tree = outputs['tree'] reconstruction = outputs['reconstruction'] photos = reconstruction.photos outputs['large'] = len(photos) > args.split if outputs['large']: # If we have a cluster address, we'll use a distributed workflow local_workflow = not bool(args.sm_cluster) octx = OSFMContext(tree.opensfm) split_done_file = octx.path("split_done.txt") if not io.file_exists(split_done_file) or self.rerun(): orig_max_concurrency = args.max_concurrency if not local_workflow: args.max_concurrency = max(1, args.max_concurrency - 1) log.ODM_INFO("Setting max-concurrency to %s to better handle remote splits" % args.max_concurrency) log.ODM_INFO("Large dataset detected (%s photos) and split set at %s. Preparing split merge." % ( len(photos), args.split)) config = [ "submodels_relpath: ../submodels/opensfm", "submodel_relpath_template: ../submodels/submodel_%04d/opensfm", "submodel_images_relpath_template: ../submodels/submodel_%04d/images", "submodel_size: %s" % args.split, "submodel_overlap: %s" % args.split_overlap, ] octx.setup(args, tree.dataset_raw, reconstruction=reconstruction, append_config=config, rerun=self.rerun()) octx.extract_metadata(self.rerun()) self.update_progress(5) if local_workflow: octx.feature_matching(self.rerun()) self.update_progress(20) # Create submodels if not io.dir_exists(tree.submodels_path) or self.rerun(): if io.dir_exists(tree.submodels_path): log.ODM_WARNING("Removing existing submodels directory: %s" % tree.submodels_path) shutil.rmtree(tree.submodels_path) octx.run("create_submodels") else: log.ODM_WARNING("Submodels directory already exist at: %s" % tree.submodels_path) # Find paths of all submodels mds = metadataset.MetaDataSet(tree.opensfm) submodel_paths = [os.path.abspath(p) for p in mds.get_submodel_paths()] for sp in submodel_paths: sp_octx = OSFMContext(sp) # Copy filtered GCP file if needed # One in OpenSfM's directory, one in the submodel project directory if reconstruction.gcp and reconstruction.gcp.exists(): submodel_gcp_file = os.path.abspath(sp_octx.path("..", "gcp_list.txt")) submodel_images_dir = os.path.abspath(sp_octx.path("..", "images")) if reconstruction.gcp.make_filtered_copy(submodel_gcp_file, submodel_images_dir): log.ODM_INFO("Copied filtered GCP file to %s" % submodel_gcp_file) io.copy(submodel_gcp_file, os.path.abspath(sp_octx.path("gcp_list.txt"))) else: log.ODM_INFO( "No GCP will be copied for %s, not enough images in the submodel are referenced by the GCP" % sp_octx.name()) # Reconstruct each submodel log.ODM_INFO( "Dataset has been split into %s submodels. Reconstructing each submodel..." % len(submodel_paths)) self.update_progress(25) if local_workflow: for sp in submodel_paths: log.ODM_INFO("Reconstructing %s" % sp) OSFMContext(sp).reconstruct(self.rerun()) else: lre = LocalRemoteExecutor(args.sm_cluster, self.rerun()) lre.set_projects([os.path.abspath(os.path.join(p, "..")) for p in submodel_paths]) lre.run_reconstruction() self.update_progress(50) # TODO: this is currently not working and needs a champion to fix it # https://community.opendronemap.org/t/filenotfound-error-cameras-json/6047/2 # resplit_done_file = octx.path('resplit_done.txt') # if not io.file_exists(resplit_done_file) and bool(args.split_multitracks): # submodels = mds.get_submodel_paths() # i = 0 # for s in submodels: # template = octx.path("../aligned_submodels/submodel_%04d") # with open(s+"/reconstruction.json", "r") as f: # j = json.load(f) # for k in range(0, len(j)): # v = j[k] # path = template % i # #Create the submodel path up to opensfm # os.makedirs(path+"/opensfm") # os.makedirs(path+"/images") # #symlinks for common data # images = os.listdir(octx.path("../images")) # for image in images: # os.symlink("../../../images/"+image, path+"/images/"+image) # os.symlink("../../../opensfm/exif", path+"/opensfm/exif") # os.symlink("../../../opensfm/features", path+"/opensfm/features") # os.symlink("../../../opensfm/matches", path+"/opensfm/matches") # os.symlink("../../../opensfm/reference_lla.json", path+"/opensfm/reference_lla.json") # os.symlink("../../../opensfm/camera_models.json", path+"/opensfm/camera_models.json") # shutil.copy(s+"/../cameras.json", path+"/cameras.json") # shutil.copy(s+"/../images.json", path+"/images.json") # with open(octx.path("config.yaml")) as f: # doc = yaml.safe_load(f) # dmcv = "depthmap_min_consistent_views" # if dmcv in doc: # if len(v["shots"]) < doc[dmcv]: # doc[dmcv] = len(v["shots"]) # print("WARNING: Reduced "+dmcv+" to accommodate short track") # with open(path+"/opensfm/config.yaml", "w") as f: # yaml.dump(doc, f) # #We need the original tracks file for the visualsfm export, since # #there may still be point matches between the tracks # shutil.copy(s+"/tracks.csv", path+"/opensfm/tracks.csv") # #Create our new reconstruction file with only the relevant track # with open(path+"/opensfm/reconstruction.json", "w") as o: # json.dump([v], o) # #Create image lists # with open(path+"/opensfm/image_list.txt", "w") as o: # o.writelines(list(map(lambda x: "../images/"+x+'\n', v["shots"].keys()))) # with open(path+"/img_list.txt", "w") as o: # o.writelines(list(map(lambda x: x+'\n', v["shots"].keys()))) # i+=1 # os.rename(octx.path("../submodels"), octx.path("../unaligned_submodels")) # os.rename(octx.path("../aligned_submodels"), octx.path("../submodels")) # octx.touch(resplit_done_file) mds = metadataset.MetaDataSet(tree.opensfm) submodel_paths = [os.path.abspath(p) for p in mds.get_submodel_paths()] # Align octx.align_reconstructions(self.rerun()) self.update_progress(55) # Aligned reconstruction is in reconstruction.aligned.json # We need to rename it to reconstruction.json remove_paths = [] for sp in submodel_paths: sp_octx = OSFMContext(sp) aligned_recon = sp_octx.path('reconstruction.aligned.json') unaligned_recon = sp_octx.path('reconstruction.unaligned.json') main_recon = sp_octx.path('reconstruction.json') if io.file_exists(main_recon) and io.file_exists(unaligned_recon) and not self.rerun(): log.ODM_INFO("Submodel %s has already been aligned." % sp_octx.name()) continue if not io.file_exists(aligned_recon): log.ODM_WARNING("Submodel %s does not have an aligned reconstruction (%s). " "This could mean that the submodel could not be reconstructed " " (are there enough features to reconstruct it?). Skipping." % ( sp_octx.name(), aligned_recon)) remove_paths.append(sp) continue if io.file_exists(main_recon): shutil.move(main_recon, unaligned_recon) shutil.move(aligned_recon, main_recon) log.ODM_INFO("%s is now %s" % (aligned_recon, main_recon)) # Remove invalid submodels submodel_paths = [p for p in submodel_paths if not p in remove_paths] # Run ODM toolchain for each submodel if local_workflow: for sp in submodel_paths: sp_octx = OSFMContext(sp) log.ODM_INFO("========================") log.ODM_INFO("Processing %s" % sp_octx.name()) log.ODM_INFO("========================") argv = get_submodel_argv(args, tree.submodels_path, sp_octx.name()) # Re-run the ODM toolchain on the submodel system.run(" ".join(map(quote, map(str, argv))), env_vars=os.environ.copy()) else: lre.set_projects([os.path.abspath(os.path.join(p, "..")) for p in submodel_paths]) lre.run_toolchain() # Restore max_concurrency value args.max_concurrency = orig_max_concurrency octx.touch(split_done_file) else: log.ODM_WARNING('Found a split done file in: %s' % split_done_file) else: log.ODM_INFO("Normal dataset, will process all at once.") self.progress = 0.0
class TestRemote(unittest.TestCase): def setUp(self): self.lre = LocalRemoteExecutor('http://localhost:9001') projects = [] for i in range(9): projects.append('/submodels/submodel_00' + str(i).rjust(2, '0')) self.lre.set_projects(projects) def test_lre_init(self): self.assertFalse(self.lre.node_online) def test_processing_logic(self): # Fake online status self.lre.node_online = True MAX_QUEUE = 2 class nonloc: local_task_check = False remote_queue = 1 should_fail = False task_limit_reached = False class OdmTaskMock: def __init__(self, running, queue_num): self.running = running self.queue_num = queue_num self.uuid = 'xxxxx-xxxxx-xxxxx-xxxxx-xxxx' + str(queue_num) def info(self): class StatusMock: status = TaskStatus.RUNNING if self.running else TaskStatus.QUEUED processing_time = 1 return StatusMock() def remove(self): return True class TaskMock(Task): def process_local(self): # First task should be 0000 or 0001 if not nonloc.local_task_check: nonloc.local_task_check = self.project_path.endswith("0000") or self.project_path.endswith("0001") if nonloc.should_fail: if self.project_path.endswith("0006"): raise exceptions.TaskFailedError("FAIL #6") time.sleep(1) def process_remote(self, done): time.sleep(0.05) # file upload self.remote_task = OdmTaskMock(nonloc.remote_queue <= MAX_QUEUE, nonloc.remote_queue) self.params['tasks'].append(self.remote_task) if nonloc.should_fail: if self.project_path.endswith("0006"): raise exceptions.TaskFailedError("FAIL #6") nonloc.remote_queue += 1 # Upload successful done(error=None, partial=True) # Async processing def monitor(): try: if nonloc.task_limit_reached and random.randint(0, 4) == 0: nonloc.remote_queue -= 1 raise NodeTaskLimitReachedException("Random fail!") if not nonloc.task_limit_reached and self.remote_task.queue_num > MAX_QUEUE: nonloc.remote_queue -= 1 nonloc.task_limit_reached = True raise NodeTaskLimitReachedException("Delayed task limit reached") time.sleep(0.5) nonloc.remote_queue -= 1 done() except Exception as e: done(e) t = threading.Thread(target=monitor) self.params['threads'].append(t) t.start() self.lre.run(TaskMock) self.assertTrue(nonloc.local_task_check) nonloc.should_fail = True nonloc.remote_queue = 1 nonloc.task_limit_reached = False with self.assertRaises(exceptions.TaskFailedError): self.lre.run(TaskMock)