def run(self): from glob import glob from vsi.io.krt import Krt as KrtCamera from voxel_globe.tools.camera import save_krt self.task.update_state(state='Processing', meta={'stage':'metadata'}) self.parse_json() metadata_filenames = glob(os.path.join(self.ingest_dir, '*')) krts={} for metadata_filename in metadata_filenames: if os.stat(metadata_filename).st_size <= Krt.MAX_SIZE: try: krt_1 = KrtCamera.load(metadata_filename) krts[os.path.basename(metadata_filename)] = krt_1 except: #Hopefully non-krts throw an exception when loading import traceback as tb logger.debug('Non-KRT parsed: %s', tb.format_exc()) matches = match_images(self.image_collection.images.all(), krts.keys(), self.json_config) for match in matches: krt_1 = krts[match] logger.debug('%s matched to %s', match, matches[match].original_filename) save_krt(self.task.request.id, matches[match], krt_1.k, krt_1.r, krt_1.t, self.origin_xyz) self.save_scene()
def run(self): from glob import glob from vsi.io.krt import Krt as KrtCamera from voxel_globe.tools.camera import save_krt self.task.update_state(state='Processing', meta={'stage': 'metadata'}) self.parse_json() metadata_filenames = glob(os.path.join(self.ingest_dir, '*')) krts = {} for metadata_filename in metadata_filenames: if os.stat(metadata_filename).st_size <= Krt.MAX_SIZE: try: krt_1 = KrtCamera.load(metadata_filename) krts[os.path.basename(metadata_filename)] = krt_1 except: #Hopefully non-krts throw an exception when loading import traceback as tb logger.debug('Non-KRT parsed: %s', tb.format_exc()) matches = match_images(self.image_collection.images.all(), krts.keys(), self.json_config) for match in matches: krt_1 = krts[match] logger.debug('%s matched to %s', match, matches[match].original_filename) save_krt(self.task.request.id, matches[match], krt_1.k, krt_1.r, krt_1.t, self.origin_xyz) self.save_scene()
def main(): import matplotlib args = parse_args() plot_scene = PlotScene() if args.limits: xyz = [float(i) for i in args.limits] plot_scene.set_limits(xyz[0], xyz[1],\ xyz[2], xyz[3],\ xyz[4], xyz[5]) z_min = None if args.scene: scene = boxm2_scene_adaptor(args.scene, 'cpp') plot_scene.draw_scene_box(scene) z_min = scene.bbox[0][2] if (args.limits and not z_min): z_min = xyz[4] if args.cameras: cameras = [] camera_files = [ x for y in map(lambda x: glob(x), args.cameras) for x in y ] camera_files = natural_sorted(camera_files) for camera_file in camera_files: krt = Krt.load(camera_file) cameras.append(krt) plot_scene.draw_cameras(cameras, z_min) if args.cameras and args.diff: cameras = [] camera_files = [ x for y in map(lambda x: glob(x), args.diff) for x in y ] camera_files = natural_sorted(camera_files) for camera_file in camera_files: krt = Krt.load(camera_file) cameras.append(krt) plot_scene.draw_cameras(cameras, z_min, 'g') plt.show()
def run(self): from glob import glob from vsi.io.krt import Krt as KrtCamera self.task.update_state(state='Processing', meta={'stage':'metadata'}) self.create_camera_set() self.parse_json() metadata_filenames = glob(os.path.join(self.ingest_dir, '*')) krts={} for metadata_filename in metadata_filenames: if os.stat(metadata_filename).st_size <= Krt.MAX_SIZE: try: krt_1 = KrtCamera.load(metadata_filename) krts[os.path.basename(metadata_filename)] = krt_1 except: #Hopefully non-krts throw an exception when loading import traceback as tb logger.debug('Non-KRT parsed: %s', tb.format_exc()) matches = match_images(self.image_set.images.all(), krts.keys(), self.json_config) matching_attributes = match_attributes(self.image_set.images.all(), self.json_config) cameras = [] for match in matches: krt_1 = krts[match] attributes = '' logger.debug('%s matched to %s', match, matches[match].filename_path) camera = save_krt(self.task.request.id, matches[match], krt_1.k, krt_1.r, krt_1.t, self.origin_xyz, srid=self.srid, attributes=matching_attributes.get( os.path.basename(matches[match].filename_path), {})) self.camera_set.cameras.add(camera) self.save_scene()
def runVisualSfm(self, imageCollectionId, sceneId, cleanup=True, history=None): from voxel_globe.meta import models from voxel_globe.order.visualsfm.models import Order from os import environ as env from os.path import join as path_join import os import shutil from .tools import writeNvm, writeGcpFile, generateMatchPoints, runSparse,\ readNvm import voxel_globe.tools from voxel_globe.tools.wget import download as wget from voxel_globe.tools.camera import get_kto import voxel_globe.tools.enu as enu import numpy import boxm2_adaptor import boxm2_scene_adaptor from voxel_globe.tools.xml_dict import load_xml from django.contrib.gis.geos import Point from voxel_globe.tools.image import convert_image from distutils.spawn import find_executable from glob import glob self.update_state(state='INITIALIZE', meta={'stage': 0}) #Make main temp dir and cd into it with voxel_globe.tools.task_dir('visualsfm', cd=True) as processing_dir: #Because visualsfm is so... bad, I have to copy it locally so I can #configure it visualsfm_exe = os.path.join( processing_dir, os.path.basename(os.environ['VIP_VISUALSFM_EXE'])) shutil.copy(find_executable(os.environ['VIP_VISUALSFM_EXE']), visualsfm_exe) with open(os.path.join(processing_dir, 'nv.ini'), 'w') as fid: fid.write('param_search_multiple_models 0\n') fid.write('param_use_siftgpu 2\n') matchFilename = path_join(processing_dir, 'match.nvm') sparce_filename = path_join(processing_dir, 'sparse.nvm') #This can NOT be changed in version 0.5.25 gcpFilename = matchFilename + '.gcp' logger.debug('Task %s is processing in %s' % (self.request.id, processing_dir)) image_collection = models.ImageCollection.objects.get( id=imageCollectionId).history(history) imageList = image_collection.images.all() #A Little bit of database logging oid = Order(processingDir=processing_dir, imageCollection=image_collection) ### if 1: ### try: #Not fully integrated yet ### sift_gpu = siftgpu.SiftGPU() ### except: ### pass localImageList = [] for x in range(len(imageList)): #Download the image locally image = imageList[x].history(history) self.update_state(state='INITIALIZE', meta={ 'stage': 'image fetch', 'i': x, 'total': len(imageList) }) imageName = image.originalImageUrl extension = os.path.splitext(imageName)[1].lower() localName = path_join(processing_dir, 'frame_%05d%s' % (x + 1, extension)) wget(imageName, localName, secret=True) #Convert the image if necessary if extension not in ['.jpg', '.jpeg', '.pgm', '.ppm']: self.update_state(state='INITIALIZE', meta={ 'stage': 'image convert', 'i': x, 'total': len(imageList) }) #Add code here to converty to jpg for visual sfm if extension in ['.png']: #'not implemented': from PIL import Image image_temp = Image.open(localName) if len(image_temp.mode ) > 1: #Stupid visual sfm is picky :( new_local_name = os.path.splitext( localName)[0] + '.ppm' else: new_local_name = os.path.splitext( localName)[0] + '.pgm' new_local_name = os.path.splitext(localName)[0] + '.jpg' ###ingest.convert_image(localName, new_local_name, 'PNM') convert_image(localName, new_local_name, 'JPEG', options=('QUALITY=100', )) os.remove(localName) localName = new_local_name else: raise Exception('Unsupported file type') imageInfo = {'localName': localName, 'index': x} try: [K, T, llh] = get_kto(image, history=history) imageInfo['K_intrinsics'] = K imageInfo['transformation'] = T imageInfo['enu_origin'] = llh except: pass localImageList.append(imageInfo) ### if 1: ### try: #not fully integrated yet ### sift_gpu.create_sift(localName, os.path.splitext(localName)[0]+'.sift') ### except: ### pass # filenames = list(imageList.values_list('imageUrl')) # logger.info('The image list 0is %s' % filenames) self.update_state(state='PROCESSING', meta={ 'stage': 'generate match points', 'processing_dir': processing_dir, 'total': len(imageList) }) generateMatchPoints(map(lambda x: x['localName'], localImageList), matchFilename, logger=logger, executable=visualsfm_exe) # cameras = []; # for image in imageList: # if 1: # #try: # [K, T, llh] = get_kto(image); # cameras.append({'image':image.id, 'K':K, 'tranformation': # T, 'origin':llh}) # #except: # pass # origin = numpy.median(origin, axis=0) # origin = [-92.215197, 37.648858, 268.599] scene = models.Scene.objects.get(id=sceneId).history(history) origin = list(scene.origin) if scene.geolocated: self.update_state(state='PROCESSING', meta={'stage': 'writing gcp points'}) #find the middle origin, and make it THE origin data = [] #.name .llh_xyz for imageInfo in localImageList: try: r = imageInfo['transformation'][0:3, 0:3] t = imageInfo['transformation'][0:3, 3:] enu_point = -r.transpose().dot(t) if not numpy.array_equal(imageInfo['enu_origin'], origin): ecef = enu.enu2xyz( refLong=imageInfo['enu_origin'][0], refLat=imageInfo['enu_origin'][1], refH=imageInfo['enu_origin'][2], #e=imageInfo['transformation'][0, 3], #n=imageInfo['transformation'][1, 3], #u=imageInfo['transformation'][2, 3]) e=enu_point[0], n=enu_point[1], u=enu_point[2]) enu_point = enu.xyz2enu(refLong=origin[0], refLat=origin[1], refH=origin[2], X=ecef[0], Y=ecef[1], Z=ecef[2]) # else: # enu_point = imageInfo['transformation'][0:3, 3]; dataBit = { 'filename': imageInfo['localName'], 'xyz': enu_point } data.append(dataBit) #Make this a separate ingest process, making CAMERAS linked to the #images #data = arducopter.loadAdjTaggedMetadata( # r'd:\visualsfm\2014-03-20 13-22-44_adj_tagged_images.txt'); #Make this read the cameras from the DB instead writeGcpFile(data, gcpFilename) except: #some images may have no camera pass oid.lvcsOrigin = str(origin) oid.save() self.update_state(state='PROCESSING', meta={'stage': 'sparse SFM'}) runSparse(matchFilename, sparce_filename, gcp=scene.geolocated, shared=True, logger=logger, executable=visualsfm_exe) self.update_state(state='FINALIZE', meta={'stage': 'loading resulting cameras'}) #prevent bundle2scene from getting confused and crashing sift_data = os.path.join(processing_dir, 'sift_data') os.mkdir(sift_data) for filename in glob(os.path.join(processing_dir, '*.mat')) +\ glob(os.path.join(processing_dir, '*.sift')): shutil.move(filename, sift_data) if scene.geolocated: #Create a uscene.xml for the geolocated case. All I want out of this is #the bounding box and gsd calculation. boxm2_adaptor.bundle2scene(sparce_filename, processing_dir, isalign=False, out_dir="") cams = readNvm(path_join(processing_dir, 'sparse.nvm')) #cams.sort(key=lambda x:x.name) #Since the file names are frame_00001, etc... and you KNOW this order is #identical to localImageList, with some missing for cam in cams: frameName = cam.name #frame_00001, etc.... imageInfo = filter( lambda x: x['localName'].endswith(frameName), localImageList)[0] #I have to use endswith instead of == because visual sfm APPARENTLY #decides to take some liberty and make absolute paths relative image = imageList[imageInfo['index']].history(history) (k, r, t) = cam.krt(width=image.imageWidth, height=image.imageHeight) logger.info('Origin is %s' % str(origin)) llh_xyz = enu.enu2llh(lon_origin=origin[0], lat_origin=origin[1], h_origin=origin[2], east=cam.translation_xyz[0], north=cam.translation_xyz[1], up=cam.translation_xyz[2]) grcs = models.GeoreferenceCoordinateSystem.create( name='%s 0' % image.name, xUnit='d', yUnit='d', zUnit='m', location='SRID=4326;POINT(%0.15f %0.15f %0.15f)' % (origin[0], origin[1], origin[2]), service_id=self.request.id) grcs.save() cs = models.CartesianCoordinateSystem.create( name='%s 1' % (image.name), service_id=self.request.id, xUnit='m', yUnit='m', zUnit='m') cs.save() transform = models.CartesianTransform.create( name='%s 1_0' % (image.name), service_id=self.request.id, rodriguezX=Point(*r[0, :]), rodriguezY=Point(*r[1, :]), rodriguezZ=Point(*r[2, :]), translation=Point(t[0][0], t[1][0], t[2][0]), coordinateSystem_from_id=grcs.id, coordinateSystem_to_id=cs.id) transform.save() camera = image.camera try: camera.update(service_id=self.request.id, focalLengthU=k[0, 0], focalLengthV=k[1, 1], principalPointU=k[0, 2], principalPointV=k[1, 2], coordinateSystem=cs) except: camera = models.Camera.create(name=image.name, service_id=self.request.id, focalLengthU=k[0, 0], focalLengthV=k[1, 1], principalPointU=k[0, 2], principalPointV=k[1, 2], coordinateSystem=cs) camera.save() image.update(camera=camera) logger.info(str(cams[0])) else: from vsi.tools.natural_sort import natural_sorted from glob import glob from vsi.io.krt import Krt from voxel_globe.tools.camera import save_krt boxm2_adaptor.bundle2scene(sparce_filename, processing_dir, isalign=True, out_dir=processing_dir) #While the output dir is used for the b2s folders, uscene.xml is cwd #They are both set to processing_dir, so everything works out well aligned_cams = glob(os.path.join(processing_dir, 'cams_krt', '*')) #sort them naturally in case there are more then 99,999 files aligned_cams = natural_sorted(aligned_cams) if len(aligned_cams) != len(imageList): #Create a new image collection new_image_collection = models.ImageCollection.create( name="SFM Result Subset (%s)" % image_collection.name, service_id=self.request.id) # for image in image_collection.images.all(): # new_image_collection.images.add(image) new_image_collection.save() frames_keep = set( map( lambda x: int(os.path.splitext(x.split('_')[-2])[0]) - 1, aligned_cams)) for frame_index in frames_keep: new_image_collection.images.add(imageList[frame_index]) # frames_remove = set(xrange(len(imageList))) - frames_keep # # for remove_index in list(frames_remove): # #The frame number refers to the nth image in the image collection, # #so frame_00100.tif is the 100th image, starting the index at one # #See local_name above # # #remove the images sfm threw away # new_image_collection.remove(imageList[remove_index]) image_collection = new_image_collection frames_keep = list(frames_keep) else: frames_keep = xrange(len(aligned_cams)) #---Update the camera models in the database.--- for camera_index, frame_index in enumerate(frames_keep): krt = Krt.load(aligned_cams[camera_index]) image = imageList[frame_index].history(history) save_krt(self.request.id, image, krt.k, krt.r, krt.t, [0, 0, 0], srid=4326) #---Update scene information important for the no-metadata case --- scene_filename = os.path.join(processing_dir, 'model', 'uscene.xml') boxm_scene = boxm2_scene_adaptor.boxm2_scene_adaptor(scene_filename) scene.bbox_min = 'POINT(%0.15f %0.15f %0.15f)' % boxm_scene.bbox[0] scene.bbox_max = 'POINT(%0.15f %0.15f %0.15f)' % boxm_scene.bbox[1] #This is not a complete or good function really... but it will get me the #information I need. scene_dict = load_xml(scene_filename) block = scene_dict['block'] scene.default_voxel_size='POINT(%f %f %f)' % \ (float(block.at['dim_x']), float(block.at['dim_y']), float(block.at['dim_z'])) scene.save() return oid.id
def runVisualSfm(self, imageCollectionId, sceneId, cleanup=True, history=None): from voxel_globe.meta import models from voxel_globe.order.visualsfm.models import Order from os import environ as env from os.path import join as path_join import os import shutil from .tools import writeNvm, writeGcpFile, generateMatchPoints, runSparse,\ readNvm import voxel_globe.tools from voxel_globe.tools.wget import download as wget from voxel_globe.tools.camera import get_kto import voxel_globe.tools.enu as enu import numpy import boxm2_adaptor import boxm2_scene_adaptor from voxel_globe.tools.xml_dict import load_xml from django.contrib.gis.geos import Point from voxel_globe.tools.image import convert_image from distutils.spawn import find_executable from glob import glob self.update_state(state='INITIALIZE', meta={'stage':0}) #Make main temp dir and cd into it with voxel_globe.tools.task_dir('visualsfm', cd=True) as processing_dir: #Because visualsfm is so... bad, I have to copy it locally so I can #configure it visualsfm_exe = os.path.join(processing_dir, os.path.basename(os.environ['VIP_VISUALSFM_EXE'])) shutil.copy(find_executable(os.environ['VIP_VISUALSFM_EXE']), visualsfm_exe) with open(os.path.join(processing_dir, 'nv.ini'), 'w') as fid: fid.write('param_search_multiple_models 0\n') fid.write('param_use_siftgpu 2\n') matchFilename = path_join(processing_dir, 'match.nvm'); sparce_filename = path_join(processing_dir, 'sparse.nvm'); #This can NOT be changed in version 0.5.25 gcpFilename = matchFilename + '.gcp' logger.debug('Task %s is processing in %s' % (self.request.id, processing_dir)) image_collection = models.ImageCollection.objects.get( id=imageCollectionId).history(history); imageList = image_collection.images.all(); #A Little bit of database logging oid = Order(processingDir=processing_dir, imageCollection=image_collection) ### if 1: ### try: #Not fully integrated yet ### sift_gpu = siftgpu.SiftGPU() ### except: ### pass localImageList = []; for x in range(len(imageList)): #Download the image locally image = imageList[x].history(history); self.update_state(state='INITIALIZE', meta={'stage':'image fetch', 'i':x, 'total':len(imageList)}) imageName = image.originalImageUrl; extension = os.path.splitext(imageName)[1].lower() localName = path_join(processing_dir, 'frame_%05d%s' % (x+1, extension)); wget(imageName, localName, secret=True) #Convert the image if necessary if extension not in ['.jpg', '.jpeg', '.pgm', '.ppm']: self.update_state(state='INITIALIZE', meta={'stage':'image convert', 'i':x, 'total':len(imageList)}) #Add code here to converty to jpg for visual sfm if extension in ['.png']:#'not implemented': from PIL import Image image_temp = Image.open(localName) if len(image_temp.mode) > 1: #Stupid visual sfm is picky :( new_local_name = os.path.splitext(localName)[0] + '.ppm'; else: new_local_name = os.path.splitext(localName)[0] + '.pgm'; new_local_name = os.path.splitext(localName)[0] + '.jpg'; ###ingest.convert_image(localName, new_local_name, 'PNM') convert_image(localName, new_local_name, 'JPEG', options=('QUALITY=100',)) os.remove(localName) localName = new_local_name; else: raise Exception('Unsupported file type'); imageInfo = {'localName':localName, 'index':x} try: [K, T, llh] = get_kto(image, history=history); imageInfo['K_intrinsics'] = K; imageInfo['transformation'] = T; imageInfo['enu_origin'] = llh; except: pass localImageList.append(imageInfo); ### if 1: ### try: #not fully integrated yet ### sift_gpu.create_sift(localName, os.path.splitext(localName)[0]+'.sift') ### except: ### pass # filenames = list(imageList.values_list('imageUrl')) # logger.info('The image list 0is %s' % filenames) self.update_state(state='PROCESSING', meta={'stage':'generate match points', 'processing_dir':processing_dir, 'total':len(imageList)}) generateMatchPoints(map(lambda x:x['localName'], localImageList), matchFilename, logger=logger, executable=visualsfm_exe) # cameras = []; # for image in imageList: # if 1: # #try: # [K, T, llh] = get_kto(image); # cameras.append({'image':image.id, 'K':K, 'tranformation': # T, 'origin':llh}) # #except: # pass # origin = numpy.median(origin, axis=0) # origin = [-92.215197, 37.648858, 268.599] scene = models.Scene.objects.get(id=sceneId).history(history) origin = list(scene.origin) if scene.geolocated: self.update_state(state='PROCESSING', meta={'stage':'writing gcp points'}) #find the middle origin, and make it THE origin data = []#.name .llh_xyz for imageInfo in localImageList: try: r = imageInfo['transformation'][0:3, 0:3] t = imageInfo['transformation'][0:3, 3:] enu_point = -r.transpose().dot(t); if not numpy.array_equal(imageInfo['enu_origin'], origin): ecef = enu.enu2xyz(refLong=imageInfo['enu_origin'][0], refLat=imageInfo['enu_origin'][1], refH=imageInfo['enu_origin'][2], #e=imageInfo['transformation'][0, 3], #n=imageInfo['transformation'][1, 3], #u=imageInfo['transformation'][2, 3]) e=enu_point[0], n=enu_point[1], u=enu_point[2]) enu_point = enu.xyz2enu(refLong=origin[0], refLat=origin[1], refH=origin[2], X=ecef[0], Y=ecef[1], Z=ecef[2]) # else: # enu_point = imageInfo['transformation'][0:3, 3]; dataBit = {'filename':imageInfo['localName'], 'xyz':enu_point} data.append(dataBit); #Make this a separate ingest process, making CAMERAS linked to the #images #data = arducopter.loadAdjTaggedMetadata( # r'd:\visualsfm\2014-03-20 13-22-44_adj_tagged_images.txt'); #Make this read the cameras from the DB instead writeGcpFile(data, gcpFilename) except: #some images may have no camera pass oid.lvcsOrigin = str(origin) oid.save() self.update_state(state='PROCESSING', meta={'stage':'sparse SFM'}) runSparse(matchFilename, sparce_filename, gcp=scene.geolocated, shared=True, logger=logger, executable=visualsfm_exe) self.update_state(state='FINALIZE', meta={'stage':'loading resulting cameras'}) #prevent bundle2scene from getting confused and crashing sift_data = os.path.join(processing_dir, 'sift_data') os.mkdir(sift_data) for filename in glob(os.path.join(processing_dir, '*.mat')) +\ glob(os.path.join(processing_dir, '*.sift')): shutil.move(filename, sift_data) if scene.geolocated: #Create a uscene.xml for the geolocated case. All I want out of this is #the bounding box and gsd calculation. boxm2_adaptor.bundle2scene(sparce_filename, processing_dir, isalign=False, out_dir="") cams = readNvm(path_join(processing_dir, 'sparse.nvm')) #cams.sort(key=lambda x:x.name) #Since the file names are frame_00001, etc... and you KNOW this order is #identical to localImageList, with some missing for cam in cams: frameName = cam.name; #frame_00001, etc.... imageInfo = filter(lambda x: x['localName'].endswith(frameName), localImageList)[0] #I have to use endswith instead of == because visual sfm APPARENTLY #decides to take some liberty and make absolute paths relative image = imageList[imageInfo['index']].history(history) (k,r,t) = cam.krt(width=image.imageWidth, height=image.imageHeight); logger.info('Origin is %s' % str(origin)) llh_xyz = enu.enu2llh(lon_origin=origin[0], lat_origin=origin[1], h_origin=origin[2], east=cam.translation_xyz[0], north=cam.translation_xyz[1], up=cam.translation_xyz[2]) grcs = models.GeoreferenceCoordinateSystem.create( name='%s 0' % image.name, xUnit='d', yUnit='d', zUnit='m', location='SRID=4326;POINT(%0.15f %0.15f %0.15f)' % (origin[0], origin[1], origin[2]), service_id = self.request.id) grcs.save() cs = models.CartesianCoordinateSystem.create( name='%s 1' % (image.name), service_id = self.request.id, xUnit='m', yUnit='m', zUnit='m'); cs.save() transform = models.CartesianTransform.create( name='%s 1_0' % (image.name), service_id = self.request.id, rodriguezX=Point(*r[0,:]), rodriguezY=Point(*r[1,:]), rodriguezZ=Point(*r[2,:]), translation=Point(t[0][0], t[1][0], t[2][0]), coordinateSystem_from_id=grcs.id, coordinateSystem_to_id=cs.id) transform.save() camera = image.camera; try: camera.update(service_id = self.request.id, focalLengthU=k[0,0], focalLengthV=k[1,1], principalPointU=k[0,2], principalPointV=k[1,2], coordinateSystem=cs); except: camera = models.Camera.create(name=image.name, service_id = self.request.id, focalLengthU=k[0,0], focalLengthV=k[1,1], principalPointU=k[0,2], principalPointV=k[1,2], coordinateSystem=cs); camera.save(); image.update(camera = camera); logger.info(str(cams[0])) else: from vsi.tools.natural_sort import natural_sorted from glob import glob from vsi.io.krt import Krt from voxel_globe.tools.camera import save_krt boxm2_adaptor.bundle2scene(sparce_filename, processing_dir, isalign=True, out_dir=processing_dir) #While the output dir is used for the b2s folders, uscene.xml is cwd #They are both set to processing_dir, so everything works out well aligned_cams = glob(os.path.join(processing_dir, 'cams_krt', '*')) #sort them naturally in case there are more then 99,999 files aligned_cams = natural_sorted(aligned_cams) if len(aligned_cams) != len(imageList): #Create a new image collection new_image_collection = models.ImageCollection.create( name="SFM Result Subset (%s)" % image_collection.name, service_id = self.request.id); # for image in image_collection.images.all(): # new_image_collection.images.add(image) new_image_collection.save(); frames_keep = set(map(lambda x: int(os.path.splitext(x.split('_')[-2])[0])-1, aligned_cams)) for frame_index in frames_keep: new_image_collection.images.add(imageList[frame_index]) # frames_remove = set(xrange(len(imageList))) - frames_keep # # for remove_index in list(frames_remove): # #The frame number refers to the nth image in the image collection, # #so frame_00100.tif is the 100th image, starting the index at one # #See local_name above # # #remove the images sfm threw away # new_image_collection.remove(imageList[remove_index]) image_collection = new_image_collection frames_keep = list(frames_keep) else: frames_keep = xrange(len(aligned_cams)) #---Update the camera models in the database.--- for camera_index, frame_index in enumerate(frames_keep): krt = Krt.load(aligned_cams[camera_index]) image = imageList[frame_index].history(history) save_krt(self.request.id, image, krt.k, krt.r, krt.t, [0,0,0], srid=4326) #---Update scene information important for the no-metadata case --- scene_filename = os.path.join(processing_dir, 'model', 'uscene.xml') boxm_scene = boxm2_scene_adaptor.boxm2_scene_adaptor(scene_filename) scene.bbox_min = 'POINT(%0.15f %0.15f %0.15f)' % boxm_scene.bbox[0] scene.bbox_max = 'POINT(%0.15f %0.15f %0.15f)' % boxm_scene.bbox[1] #This is not a complete or good function really... but it will get me the #information I need. scene_dict = load_xml(scene_filename) block = scene_dict['block'] scene.default_voxel_size='POINT(%f %f %f)' % \ (float(block.at['dim_x']), float(block.at['dim_y']), float(block.at['dim_z'])) scene.save() return oid.id;
def tiepoint_registration(self, image_collection_id, history=None): from PIL import Image import numpy as np from django.contrib.gis import geos import vpgl_adaptor from vsi.io.krt import Krt from voxel_globe.meta import models import voxel_globe.tools from voxel_globe.tools.camera import get_krt, save_krt from voxel_globe.tools.celery import Popen from voxel_globe.tools.xml_dict import load_xml self.update_state(state='INITIALIZE', meta={'id': image_collection_id}) image_collection = models.ImageCollection.objects.get( id=image_collection_id).history(history) control_points = {} for fr, image in enumerate(image_collection.images.all()): image = image.history(history) tiepoint_ids = set([ x for imagen in models.Image.objects.filter(objectId=image.objectId) for x in imagen.tiepoint_set.all().values_list('objectId', flat=True) ]) for tiepoint_id in tiepoint_ids: tiepoint = models.TiePoint.objects.get( objectId=tiepoint_id, newerVersion=None).history(history) #demoware code hack! if 'error' in tiepoint.geoPoint.name.lower(): continue if not tiepoint.deleted: control_point_id = tiepoint.geoPoint.objectId if control_point_id not in control_points: control_points[control_point_id] = {'tiepoints': {}} control_points[control_point_id]['tiepoints'][fr] = list( tiepoint.point) lla_xyz = models.ControlPoint.objects.get( objectId=control_point_id, newerVersion=None).history(history).point.coords control_points[control_point_id]['3d'] = [ lla_xyz[x] for x in [1, 0, 2] ] #filter only control points with more than 1 tiepoint control_points = { k: v for k, v in control_points.iteritems() if len(v['tiepoints'].keys()) > 1 } origin_yxz = np.mean([v['3d'] for k, v in control_points.iteritems()], axis=0) lvcs = vpgl_adaptor.create_lvcs(origin_yxz[0], origin_yxz[1], origin_yxz[2], 'wgs84') for control_point in control_points: control_points[control_point][ 'lvcs'] = vpgl_adaptor.convert_to_local_coordinates2( lvcs, *control_points[control_point]['3d']) images = {} with voxel_globe.tools.task_dir('visualsfm', cd=True) as processing_dir: dummy_imagename = os.path.join(processing_dir, 'blank.jpg') img = Image.fromarray(np.empty([1, 1], dtype=np.uint8)) img.save(dummy_imagename) #Thank you stupid site file for fr, image in enumerate(image_collection.images.all()): (K, R, T, o) = get_krt(image.history(history), history=history) images[fr] = image.objectId with open(os.path.join(processing_dir, 'frame_%05d.txt' % fr), 'w') as fid: print >> fid, (("%0.18f " * 3 + "\n") * 3) % ( K[0, 0], K[0, 1], K[0, 2], K[1, 0], K[1, 1], K[1, 2], K[2, 0], K[2, 1], K[2, 2]) print >> fid, (("%0.18f " * 3 + "\n") * 3) % ( R[0, 0], R[0, 1], R[0, 2], R[1, 0], R[1, 1], R[1, 2], R[2, 0], R[2, 1], R[2, 2]) print >> fid, ("%0.18f " * 3 + "\n") % (T[0, 0], T[1, 0], T[2, 0]) site_in_name = os.path.join(processing_dir, 'site.xml') site_out_name = os.path.join(processing_dir, 'site2.xml') with open(site_in_name, 'w') as fid: fid.write('''<BWM_VIDEO_SITE name="Triangulation"> <videoSiteDir path="%s"> </videoSiteDir> <videoPath path="%s"> </videoPath> <cameraPath path="%s/*.txt"> </cameraPath> <Objects> </Objects>ve <Correspondences>\n''' % (processing_dir, dummy_imagename, processing_dir)) for control_point_index, control_point_id in enumerate( control_points): fid.write('<Correspondence id="%d">\n' % control_point_index) for fr, tie_point in control_points[control_point_id][ 'tiepoints'].iteritems(): fid.write('<CE fr="%d" u="%f" v="%f"/>\n' % (fr, tie_point[0], tie_point[1])) fid.write('</Correspondence>\n') control_points[control_point_id]['id'] = control_point_index fid.write('''</Correspondences> </BWM_VIDEO_SITE>\n''') #triangulate the points Popen([ 'bwm_triangulate_2d_corrs', '-site', site_in_name, '-out', site_out_name ], logger=logger).wait() #Read in the result, and load into points_triangulate structure xml = load_xml(site_out_name) points_triangulate = {'id': [], 'x': [], 'y': [], 'z': []} for correspondence in xml['Correspondences']['Correspondence']: points_triangulate['id'].append(int(correspondence.at['id'])) points_triangulate['x'].append( float(correspondence['corr_world_point'].at['X'])) points_triangulate['y'].append( float(correspondence['corr_world_point'].at['Y'])) points_triangulate['z'].append( float(correspondence['corr_world_point'].at['Z'])) #Read the points out of the control points structure, but make sure they are #in the same order (check id == point_id points_orig = {'x': [], 'y': [], 'z': []} for point_id in points_triangulate['id']: point = [ v['lvcs'] for k, v in control_points.iteritems() if v['id'] == point_id ] points_orig['x'].append(point[0][0]) points_orig['y'].append(point[0][1]) points_orig['z'].append(point[0][2]) new_cameras = os.path.join(processing_dir, 'new_cameras') os.mkdir(new_cameras) #Make transformation transform, scale = vpgl_adaptor.compute_transformation( points_triangulate['x'], points_triangulate['y'], points_triangulate['z'], points_orig['x'], points_orig['y'], points_orig['z'], processing_dir, new_cameras) #calculate the new bounding box bbox_min, bbox_max = vpgl_adaptor.compute_transformed_box( list(image_collection.scene.bbox_min), list(image_collection.scene.bbox_max), transform) #calculate the new voxel size default_voxel_size = geos.Point( *(x * scale for x in image_collection.scene.default_voxel_size)) scene = models.Scene.create( name=image_collection.scene.name + ' tiepoint registered', service_id=self.request.id, origin=geos.Point(origin_yxz[1], origin_yxz[0], origin_yxz[2]), bbox_min=geos.Point(*bbox_min), bbox_max=geos.Point(*bbox_max), default_voxel_size=default_voxel_size, geolocated=True) scene.save() image_collection.scene = scene image_collection.save() for fr, image_id in images.iteritems(): krt = Krt.load(os.path.join(new_cameras, 'frame_%05d.txt' % fr)) image = models.Image.objects.get(objectId=image_id, newerVersion=None) save_krt(self.request.id, image, krt.k, krt.r, krt.t, [origin_yxz[x] for x in [1, 0, 2]], srid=4326)
def tiepoint_registration(self, image_collection_id, history=None): from PIL import Image import numpy as np from django.contrib.gis import geos import vpgl_adaptor from vsi.io.krt import Krt from voxel_globe.meta import models import voxel_globe.tools from voxel_globe.tools.camera import get_krt, save_krt from voxel_globe.tools.celery import Popen from voxel_globe.tools.xml_dict import load_xml self.update_state(state='INITIALIZE', meta={'id':image_collection_id}) image_collection = models.ImageCollection.objects.get(id=image_collection_id).history(history) control_points = {} for fr,image in enumerate(image_collection.images.all()): image = image.history(history) tiepoint_ids = set([x for imagen in models.Image.objects.filter(objectId=image.objectId) for x in imagen.tiepoint_set.all().values_list('objectId', flat=True)]) for tiepoint_id in tiepoint_ids: tiepoint = models.TiePoint.objects.get(objectId=tiepoint_id, newerVersion=None).history(history) #demoware code hack! if 'error' in tiepoint.geoPoint.name.lower(): continue if not tiepoint.deleted: control_point_id = tiepoint.geoPoint.objectId if control_point_id not in control_points: control_points[control_point_id] = {'tiepoints':{}} control_points[control_point_id]['tiepoints'][fr] = list(tiepoint.point) lla_xyz = models.ControlPoint.objects.get(objectId = control_point_id, newerVersion=None).history(history).point.coords control_points[control_point_id]['3d'] = [lla_xyz[x] for x in [1,0,2]] #filter only control points with more than 1 tiepoint control_points = {k:v for k,v in control_points.iteritems() if len(v['tiepoints'].keys()) > 1} origin_yxz = np.mean([v['3d'] for k,v in control_points.iteritems()], axis=0) lvcs = vpgl_adaptor.create_lvcs(origin_yxz[0], origin_yxz[1], origin_yxz[2], 'wgs84') for control_point in control_points: control_points[control_point]['lvcs'] = vpgl_adaptor.convert_to_local_coordinates2(lvcs, *control_points[control_point]['3d']) images = {} with voxel_globe.tools.task_dir('visualsfm', cd=True) as processing_dir: dummy_imagename = os.path.join(processing_dir, 'blank.jpg') img = Image.fromarray(np.empty([1,1], dtype=np.uint8)) img.save(dummy_imagename) #Thank you stupid site file for fr,image in enumerate(image_collection.images.all()): (K,R,T,o) = get_krt(image.history(history), history=history) images[fr] = image.objectId with open(os.path.join(processing_dir, 'frame_%05d.txt' % fr), 'w') as fid: print >>fid, (("%0.18f "*3+"\n")*3) % (K[0,0], K[0,1], K[0,2], K[1,0], K[1,1], K[1,2], K[2,0], K[2,1], K[2,2]); print >>fid, (("%0.18f "*3+"\n")*3) % (R[0,0], R[0,1], R[0,2], R[1,0], R[1,1], R[1,2], R[2,0], R[2,1], R[2,2]); print >>fid, ("%0.18f "*3+"\n") % (T[0,0], T[1,0], T[2,0]); site_in_name = os.path.join(processing_dir, 'site.xml') site_out_name = os.path.join(processing_dir, 'site2.xml') with open(site_in_name, 'w') as fid: fid.write('''<BWM_VIDEO_SITE name="Triangulation"> <videoSiteDir path="%s"> </videoSiteDir> <videoPath path="%s"> </videoPath> <cameraPath path="%s/*.txt"> </cameraPath> <Objects> </Objects>ve <Correspondences>\n''' % (processing_dir, dummy_imagename, processing_dir)) for control_point_index, control_point_id in enumerate(control_points): fid.write('<Correspondence id="%d">\n' % control_point_index) for fr, tie_point in control_points[control_point_id]['tiepoints'].iteritems(): fid.write('<CE fr="%d" u="%f" v="%f"/>\n' % (fr, tie_point[0], tie_point[1])) fid.write('</Correspondence>\n') control_points[control_point_id]['id'] = control_point_index fid.write('''</Correspondences> </BWM_VIDEO_SITE>\n''') #triangulate the points Popen(['bwm_triangulate_2d_corrs', '-site', site_in_name, '-out', site_out_name], logger=logger).wait() #Read in the result, and load into points_triangulate structure xml = load_xml(site_out_name) points_triangulate = {'id':[], 'x':[], 'y':[], 'z':[]} for correspondence in xml['Correspondences']['Correspondence']: points_triangulate['id'].append(int(correspondence.at['id'])) points_triangulate['x'].append(float(correspondence['corr_world_point'].at['X'])) points_triangulate['y'].append(float(correspondence['corr_world_point'].at['Y'])) points_triangulate['z'].append(float(correspondence['corr_world_point'].at['Z'])) #Read the points out of the control points structure, but make sure they are #in the same order (check id == point_id points_orig = {'x':[], 'y':[], 'z':[]} for point_id in points_triangulate['id']: point = [v['lvcs'] for k,v in control_points.iteritems() if v['id'] == point_id] points_orig['x'].append(point[0][0]) points_orig['y'].append(point[0][1]) points_orig['z'].append(point[0][2]) new_cameras = os.path.join(processing_dir, 'new_cameras') os.mkdir(new_cameras) #Make transformation transform, scale = vpgl_adaptor.compute_transformation(points_triangulate['x'], points_triangulate['y'], points_triangulate['z'], points_orig['x'],points_orig['y'],points_orig['z'], processing_dir, new_cameras) #calculate the new bounding box bbox_min, bbox_max = vpgl_adaptor.compute_transformed_box(list(image_collection.scene.bbox_min), list(image_collection.scene.bbox_max), transform) #calculate the new voxel size default_voxel_size=geos.Point(*(x*scale for x in image_collection.scene.default_voxel_size)) scene = models.Scene.create(name=image_collection.scene.name+' tiepoint registered', service_id=self.request.id, origin=geos.Point(origin_yxz[1], origin_yxz[0], origin_yxz[2]), bbox_min=geos.Point(*bbox_min), bbox_max=geos.Point(*bbox_max), default_voxel_size=default_voxel_size, geolocated=True) scene.save() image_collection.scene=scene image_collection.save() for fr, image_id in images.iteritems(): krt = Krt.load(os.path.join(new_cameras, 'frame_%05d.txt' % fr)) image = models.Image.objects.get(objectId=image_id, newerVersion=None) save_krt(self.request.id, image, krt.k, krt.r, krt.t, [origin_yxz[x] for x in [1,0,2]], srid=4326)
def runVisualSfm(self, imageSetId, sceneId, cleanup=True): from voxel_globe.meta import models from os import environ as env from os.path import join as path_join import os import shutil import time from django.contrib.gis.geos import Point from .tools import writeNvm, writeGcpFile, generateMatchPoints, runSparse,\ readNvm import voxel_globe.tools from voxel_globe.tools.camera import get_kto, save_krt import voxel_globe.tools.enu as enu import numpy import boxm2_adaptor import boxm2_scene_adaptor from voxel_globe.tools.xml_dict import load_xml from django.contrib.gis.geos import Point from voxel_globe.tools.image import convert_image from distutils.spawn import find_executable from vsi.iglob import glob as glob self.update_state(state='INITIALIZE', meta={'stage':0}) #Make main temp dir and cd into it with voxel_globe.tools.task_dir('visualsfm', cd=True) as processing_dir: #Because visualsfm is so... bad, I need to copy it locally so I can #configure it visualsfm_exe = os.path.join(processing_dir, 'visualsfm') shutil.copy(find_executable('VisualSFM'), visualsfm_exe) with open(os.path.join(processing_dir, 'nv.ini'), 'w') as fid: fid.write('param_search_multiple_models 0\n') fid.write('param_use_siftgpu 2\n') matchFilename = path_join(processing_dir, 'match.nvm') sparce_filename = path_join(processing_dir, 'sparse.nvm') #This can NOT be changed in version 0.5.25 gcpFilename = matchFilename + '.gcp' logger.debug('Task %s is processing in %s' % (self.request.id, processing_dir)) image_set = models.ImageSet.objects.get( id=imageSetId) imageList = image_set.images.all() ### if 1: ### try: #Not fully integrated yet ### sift_gpu = siftgpu.SiftGPU() ### except: ### pass localImageList = [] for x in range(len(imageList)): #Download the image locally image = imageList[x] self.update_state(state='INITIALIZE', meta={'stage':'image fetch', 'i':x, 'total':len(imageList)}) imageName = image.filename_path extension = os.path.splitext(imageName)[1].lower() localName = path_join(processing_dir, 'frame_%05d%s' % (x+1, extension)) #lncp(imageName, localName) #Stupid VisualSFM dereferences symlinks, breaking this shutil.copyfile(imageName, localName) #Convert the image if necessary if extension not in ['.jpg', '.jpeg', '.pgm', '.ppm']: self.update_state(state='INITIALIZE', meta={'stage':'image convert', 'i':x, 'total':len(imageList)}) #Add code here to converty to jpg for visual sfm if extension in ['.png']:#'not implemented': from PIL import Image image_temp = Image.open(localName) # if len(image_temp.mode) > 1: #Stupid visual sfm is picky :( # new_local_name = os.path.splitext(localName)[0] + '.ppm' # else: # new_local_name = os.path.splitext(localName)[0] + '.pgm' new_local_name = os.path.splitext(localName)[0] + '.jpg' ###ingest.convert_image(localName, new_local_name, 'PNM') convert_image(localName, new_local_name, 'JPEG', options=('QUALITY=100',)) os.remove(localName) localName = new_local_name else: raise Exception('Unsupported file type') imageInfo = {'localName':localName, 'index':x} try: [K, T, llh] = get_kto(image) imageInfo['K_intrinsics'] = K imageInfo['transformation'] = T imageInfo['enu_origin'] = llh except: pass localImageList.append(imageInfo) ### if 1: ### try: #not fully integrated yet ### sift_gpu.create_sift(localName, os.path.splitext(localName)[0]+'.sift') ### except: ### pass # filenames = list(imageList.values_list('image_url')) # logger.info('The image list 0is %s' % filenames) self.update_state(state='PROCESSING', meta={'stage':'generate match points', 'processing_dir':processing_dir, 'total':len(imageList)}) pid = generateMatchPoints(map(lambda x:x['localName'], localImageList), matchFilename, logger=logger, executable=visualsfm_exe) old_mat=None old_sift=None #TODO: Replace with inotify to monitor directory while pid.poll() is None: mat = len(glob(os.path.join(processing_dir, '*.mat'), False)) sift = len(glob(os.path.join(processing_dir, '*.sift'), False)) if mat != old_mat or \ sift != old_sift: old_mat=mat old_sift=sift self.update_state(state='PROCESSING', meta={'stage':'generate match points', 'processing_dir':processing_dir, 'sift':sift, 'mat':mat, 'total':len(imageList)}) time.sleep(5) # cameras = [] # for image in imageList: # if 1: # #try: # [K, T, llh] = get_kto(image) # cameras.append({'image':image.id, 'K':K, 'tranformation': # T, 'origin':llh}) # #except: # pass # origin = numpy.median(origin, axis=0) # origin = [-92.215197, 37.648858, 268.599] scene = models.Scene.objects.get(id=sceneId) origin = list(scene.origin) if scene.geolocated: self.update_state(state='PROCESSING', meta={'stage':'writing gcp points'}) #find the middle origin, and make it THE origin data = []#.name .llh_xyz for imageInfo in localImageList: try: r = imageInfo['transformation'][0:3, 0:3] t = imageInfo['transformation'][0:3, 3:] enu_point = -r.transpose().dot(t) if not numpy.array_equal(imageInfo['enu_origin'], origin): ecef = enu.enu2xyz(refLong=imageInfo['enu_origin'][0], refLat=imageInfo['enu_origin'][1], refH=imageInfo['enu_origin'][2], #e=imageInfo['transformation'][0, 3], #n=imageInfo['transformation'][1, 3], #u=imageInfo['transformation'][2, 3]) e=enu_point[0], n=enu_point[1], u=enu_point[2]) enu_point = enu.xyz2enu(refLong=origin[0], refLat=origin[1], refH=origin[2], X=ecef[0], Y=ecef[1], Z=ecef[2]) # else: # enu_point = imageInfo['transformation'][0:3, 3] dataBit = {'filename':imageInfo['localName'], 'xyz':enu_point} data.append(dataBit) #Make this a separate ingest process, making CAMERAS linked to the #images #data = arducopter.loadAdjTaggedMetadata( # r'd:\visualsfm\2014-03-20 13-22-44_adj_tagged_images.txt') #Make this read the cameras from the DB instead writeGcpFile(data, gcpFilename) except: #some images may have no camera pass self.update_state(state='PROCESSING', meta={'stage':'sparse SFM'}) pid = runSparse(matchFilename, sparce_filename, gcp=scene.geolocated, shared=True, logger=logger, executable=visualsfm_exe) pid.wait() self.update_state(state='FINALIZE', meta={'stage':'loading resulting cameras'}) #prevent bundle2scene from getting confused and crashing sift_data = os.path.join(processing_dir, 'sift_data') os.mkdir(sift_data) for filename in glob(os.path.join(processing_dir, '*.mat'), False) +\ glob(os.path.join(processing_dir, '*.sift'), False): shutil.move(filename, sift_data) if scene.geolocated: #Create a uscene.xml for the geolocated case. All I want out of this is #the bounding box and gsd calculation. boxm2_adaptor.bundle2scene(sparce_filename, processing_dir, isalign=False, out_dir="") cams = readNvm(path_join(processing_dir, 'sparse.nvm')) #cams.sort(key=lambda x:x.name) #Since the file names are frame_00001, etc... and you KNOW this order is #identical to localImageList, with some missing camera_set = models.CameraSet(name="Visual SFM Geo %s" % image_set.name, service_id = self.request.id, images_id = imageSetId) camera_set.save() for cam in cams: frameName = cam.name #frame_00001, etc.... imageInfo = filter(lambda x: x['localName'].endswith(frameName), localImageList)[0] #I have to use endswith instead of == because visual sfm APPARENTLY #decides to take some liberty and make absolute paths relative image = imageList[imageInfo['index']] (k,r,t) = cam.krt(width=image.image_width, height=image.image_height) t = t.flatten() camera = save_krt(self.request.id, image, k, r, t, origin, srid=4326) camera_set.cameras.add(camera) else: from vsi.tools.natural_sort import natural_sorted from vsi.io.krt import Krt boxm2_adaptor.bundle2scene(sparce_filename, processing_dir, isalign=True, out_dir=processing_dir) #While the output dir is used for the b2s folders, uscene.xml is cwd #They are both set to processing_dir, so everything works out well aligned_cams = glob(os.path.join(processing_dir, 'cams_krt', '*')) #sort them naturally in case there are more then 99,999 files aligned_cams = natural_sorted(aligned_cams) if len(aligned_cams) != len(imageList): #Create a new image set new_image_set = models.ImageSet( name="SFM Result Subset (%s)" % image_set.name, service_id = self.request.id) # for image in image_set.images.all(): # new_image_set.images.add(image) new_image_set.save() frames_keep = set(map(lambda x: int(os.path.splitext(x.split('_')[-2])[0])-1, aligned_cams)) for frame_index in frames_keep: new_image_set.images.add(imageList[frame_index]) # frames_remove = set(xrange(len(imageList))) - frames_keep # # for remove_index in list(frames_remove): # #The frame number refers to the nth image in the image set, # #so frame_00100.tif is the 100th image, starting the index at one # #See local_name above # # #remove the images sfm threw away # new_image_set.remove(imageList[remove_index]) image_set = new_image_set frames_keep = list(frames_keep) else: frames_keep = xrange(len(aligned_cams)) camera_set = models.CameraSet(name="Visual SFM %s" % image_set.name, service_id = self.request.id, images_id = imageSetId) camera_set.save() #---Update the camera models in the database.--- for camera_index, frame_index in enumerate(frames_keep): krt = Krt.load(aligned_cams[camera_index]) image = imageList[frame_index] camera = save_krt(self.request.id, image, krt.k, krt.r, krt.t, [0,0,0], srid=4326) camera_set.cameras.add(camera) #---Update scene information important for the no-metadata case --- scene_filename = os.path.join(processing_dir, 'model', 'uscene.xml') boxm_scene = boxm2_scene_adaptor.boxm2_scene_adaptor(scene_filename) scene.bbox_min = Point(*boxm_scene.bbox[0]) scene.bbox_max = Point(*boxm_scene.bbox[1]) #This is not a complete or good function really... but it will get me the #information I need. scene_dict = load_xml(scene_filename) block = scene_dict['block'] scene.default_voxel_size=Point(float(block.at['dim_x']), float(block.at['dim_y']), float(block.at['dim_z'])) scene.save()