def run(self): from vsi.iglob import glob from voxel_globe.tools.camera import save_krt from .tools import load_arducopter_metadata self.task.update_state(state='Processing', meta={'stage': 'metadata'}) metadata_filename = glob( os.path.join(self.ingest_dir, '*_adj_tagged_images.txt'), False) if not len(metadata_filename) == 1: logger.error( 'Only one metadata file should have been found, ' 'found %d instead', len(metadata_filename)) metadata_filename = metadata_filename[0] date = time_of_day = '' try: (date, time_of_day) = os.path.split(metadata_filename)[1].split(' ') time_of_day = time_of_day.split('_', 1)[0] except: time_of_day = '' metadata = load_arducopter_metadata(metadata_filename) #determine the origin via average origin_xyz = np.mean(np.array(map(lambda x: x.llh_xyz, metadata)), 0) self.parse_json(srid=7428, date=date, time_of_day=time_of_day, origin_xyz=origin_xyz) for meta in metadata: try: img = self.image_collection.images.get( name__icontains='Frame %s' % meta.filename) k = np.eye(3) k[0, 2] = img.imageWidth / 2 k[1, 2] = img.imageHeight / 2 r = np.eye(3) t = [0, 0, 0] origin = meta.llh_xyz save_krt(self.task.request.id, img, k, r, t, origin, srid=self.srid) except Exception as e: logger.warning('%s', e) logger.error('Could not match metadata entry for %s' % meta.filename) self.save_scene()
def run(self): from glob import glob from vsi.io.krt import Krt as KrtCamera from voxel_globe.tools.camera import save_krt self.task.update_state(state='Processing', meta={'stage': 'metadata'}) self.parse_json() metadata_filenames = glob(os.path.join(self.ingest_dir, '*')) krts = {} for metadata_filename in metadata_filenames: if os.stat(metadata_filename).st_size <= Krt.MAX_SIZE: try: krt_1 = KrtCamera.load(metadata_filename) krts[os.path.basename(metadata_filename)] = krt_1 except: #Hopefully non-krts throw an exception when loading import traceback as tb logger.debug('Non-KRT parsed: %s', tb.format_exc()) matches = match_images(self.image_collection.images.all(), krts.keys(), self.json_config) for match in matches: krt_1 = krts[match] logger.debug('%s matched to %s', match, matches[match].original_filename) save_krt(self.task.request.id, matches[match], krt_1.k, krt_1.r, krt_1.t, self.origin_xyz) self.save_scene()
def run(self): #You add the rest to create your brand new parser! That's it! self.task.update_state(state='Processing', meta={'stage': 'metadata'}) self.parse_json() #Set some defaults for parsing config file matching_attributes = match_attributes( self.image_collection.images.all(), self.json_config) k = np.eye(3) r = np.eye(3) t = np.zeros(3) origin = [0, 0, 0] for image in self.image_collection.images.all(): k[0, 2] = image.imageWidth / 2 k[1, 2] = image.imageHeight / 2 save_krt(self.task.request.id, image, k, r, t, origin, srid=self.srid, attributes=matching_attributes.get( image.original_filename, {})) self.save_scene() self.image_collection.scene.geolocated = False self.image_collection.scene.save()
def run(self): from glob import glob from vsi.io.krt import Krt as KrtCamera from voxel_globe.tools.camera import save_krt self.task.update_state(state='Processing', meta={'stage':'metadata'}) self.parse_json() metadata_filenames = glob(os.path.join(self.ingest_dir, '*')) krts={} for metadata_filename in metadata_filenames: if os.stat(metadata_filename).st_size <= Krt.MAX_SIZE: try: krt_1 = KrtCamera.load(metadata_filename) krts[os.path.basename(metadata_filename)] = krt_1 except: #Hopefully non-krts throw an exception when loading import traceback as tb logger.debug('Non-KRT parsed: %s', tb.format_exc()) matches = match_images(self.image_collection.images.all(), krts.keys(), self.json_config) for match in matches: krt_1 = krts[match] logger.debug('%s matched to %s', match, matches[match].original_filename) save_krt(self.task.request.id, matches[match], krt_1.k, krt_1.r, krt_1.t, self.origin_xyz) self.save_scene()
def run(self): from voxel_globe.tools.camera import save_krt #You add the rest to create your brand new parser! That's it! self.task.update_state(state='Processing', meta={'stage':'metadata'}) self.parse_json(srid=5467) #Set some defaults for parsing config file for image in self.image_collection.images.all(): save_krt(self.task.request.id, image, k, r, t, origin, srid=self.srid) self.save_scene()
def run(self): from vsi.iglob import glob self.task.update_state(state='Processing', meta={'stage':'metadata'}) metadata_filenames = glob(os.path.join(self.ingest_dir, '*.pos'), False) metadata_filenames = sorted(metadata_filenames, key=lambda s:s.lower()) metadata_basenames = map(lambda x:os.path.split(x)[-1].lower(), metadata_filenames) for metadata_filename in metadata_filenames: # try: timestamp = os.path.split(metadata_filenames[0])[1].split('-')[0] date = timestamp[0:4]+'-'+timestamp[4:6]+'-'+timestamp[6:8] time_of_day = timestamp[8:10]+':'+timestamp[10:12]+':'+timestamp[12:14] break #on first success # except: pass llhs_xyz=[] for metadata_filename in metadata_filenames: with open(metadata_filename, 'r') as fid: metadata = fid.readline().split(',') llh_xyz = [float(metadata[5]), float(metadata[4]), float(metadata[6]) \ *AngelFire.AF_DATA[self.AF_VERSION]['altitude_conversion']] llhs_xyz.append(llh_xyz) origin_xyz = np.mean(np.array(llhs_xyz), 0) self.parse_json(date=date, time_of_day=time_of_day, origin_xyz=origin_xyz) matching_attributes = match_attributes(self.image_collection.images.all(), self.json_config) for image in self.image_collection.images.all(): filename = image.original_filename metadata_filename_desired = (os.path.splitext( os.path.split(filename)[-1])[0][0:-6]+'00-VIS.pos').lower() try: metadata_index = metadata_basenames.index(metadata_filename_desired) metadata_filename = metadata_filenames[metadata_index] k = np.eye(3) k[0,2] = image.imageWidth/2 k[1,2] = image.imageHeight/2 r = np.eye(3) t = [0, 0, 0] origin = llhs_xyz[metadata_index] save_krt(self.task.request.id, image, k, r, t, origin, srid=self.srid, attributes=matching_attributes.get(image.original_filename, {})) except Exception as e: pass self.save_scene()
def run(self): #You add the rest to create your brand new parser! That's it! self.task.update_state(state='Processing', meta={'stage':'metadata'}) self.parse_json(srid=5467) #Set some defaults for parsing config file matching_attributes = match_attributes(self.image_collection.images.all(), self.json_config) for image in self.image_collection.images.all(): save_krt(self.task.request.id, image, k, r, t, origin, srid=self.srid, attributes=matching_attributes.get(image.original_filename, {})) self.save_scene()
def run(self): from vsi.iglob import glob from .tools import load_arducopter_metadata self.task.update_state(state='Processing', meta={'stage':'metadata'}) metadata_filename = glob(os.path.join(self.ingest_dir, '*_adj_tagged_images.txt'), False) if not len(metadata_filename) == 1: logger.error('Only one metadata file should have been found, ' 'found %d instead', len(metadata_filename)) metadata_filename = metadata_filename[0] date = time_of_day = '' try: (date, time_of_day) = os.path.split(metadata_filename)[1].split(' ') time_of_day = time_of_day.split('_', 1)[0] except: time_of_day = '' metadata = load_arducopter_metadata(metadata_filename) #determine the origin via average origin_xyz = np.mean(np.array(map(lambda x:x.llh_xyz, metadata)), 0) self.parse_json(srid=7428, date=date, time_of_day=time_of_day, origin_xyz=origin_xyz) matching_attributes = match_attributes(self.image_collection.images.all(), self.json_config) for meta in metadata: try: img = self.image_collection.images.get( name__icontains='Frame %s' % meta.filename) k = np.eye(3) k[0,2] = img.imageWidth/2 k[1,2] = img.imageHeight/2 r = np.eye(3) t = [0, 0, 0] origin = meta.llh_xyz save_krt(self.task.request.id, img, k, r, t, origin, srid=self.srid, attributes=matching_attributes.get(img.original_filename, {})) except Exception as e: logger.warning('%s', e) logger.error('Could not match metadata entry for %s' % meta.filename) self.save_scene()
def run(self): from voxel_globe.tools.camera import save_krt #You add the rest to create your brand new parser! That's it! self.task.update_state(state='Processing', meta={'stage': 'metadata'}) self.parse_json(srid=5467) #Set some defaults for parsing config file for image in self.image_collection.images.all(): save_krt(self.task.request.id, image, k, r, t, origin, srid=self.srid) self.save_scene()
def run(self): from voxel_globe.tools.camera import save_krt #You add the rest to create your brand new parser! That's it! self.task.update_state(state='Processing', meta={'stage':'metadata'}) self.parse_json() #Set some defaults for parsing config file k = np.eye(3) r = np.eye(3) t = np.zeros(3) origin = [0,0,0] for image in self.image_collection.images.all(): k[0,2] = image.imageWidth/2 k[1,2] = image.imageHeight/2 save_krt(self.task.request.id, image, k, r, t, origin, srid=self.srid) self.save_scene() self.image_collection.scene.geolocated = False self.image_collection.scene.save()
def run(self): #You add the rest to create your brand new parser! That's it! self.task.update_state(state='Processing', meta={'stage': 'metadata'}) self.parse_json(srid=5467) #Set some defaults for parsing config file matching_attributes = match_attributes( self.image_collection.images.all(), self.json_config) for image in self.image_collection.images.all(): save_krt(self.task.request.id, image, k, r, t, origin, srid=self.srid, attributes=matching_attributes.get( image.original_filename, {})) self.save_scene()
def run(self): #You add the rest to create your brand new parser! That's it! self.task.update_state(state='Processing', meta={'stage':'metadata'}) self.parse_json() #Set some defaults for parsing config file matching_attributes = match_attributes(self.image_collection.images.all(), self.json_config) k = np.eye(3) r = np.eye(3) t = np.zeros(3) origin = [0,0,0] for image in self.image_collection.images.all(): k[0,2] = image.imageWidth/2 k[1,2] = image.imageHeight/2 save_krt(self.task.request.id, image, k, r, t, origin, srid=self.srid, attributes=matching_attributes.get(image.original_filename, {})) self.save_scene() self.image_collection.scene.geolocated = False self.image_collection.scene.save()
def run(self): #You add the rest to create your brand new parser! That's it! self.task.update_state(state='Processing', meta={'stage':'metadata'}) self.create_camera_set() self.parse_json(srid=5467) #Set some defaults for parsing config file matching_attributes = match_attributes(self.image_set.images.all(), self.json_config) for image in self.image_set.images.all(): camera = save_krt(self.task.request.id, image, k, r, t, origin, srid=self.srid, attributes=matching_attributes.get( os.path.basename(image.filename_path), {})) self.camera_set.cameras.add(camera) self.save_scene()
def run(self): from voxel_globe.tools.camera import save_krt #You add the rest to create your brand new parser! That's it! self.task.update_state(state='Processing', meta={'stage': 'metadata'}) self.parse_json() #Set some defaults for parsing config file k = np.eye(3) r = np.eye(3) t = np.zeros(3) origin = [0, 0, 0] for image in self.image_collection.images.all(): k[0, 2] = image.imageWidth / 2 k[1, 2] = image.imageHeight / 2 save_krt(self.task.request.id, image, k, r, t, origin, srid=self.srid) self.save_scene() self.image_collection.scene.geolocated = False self.image_collection.scene.save()
def run(self): from glob import glob from vsi.io.krt import Krt as KrtCamera self.task.update_state(state='Processing', meta={'stage':'metadata'}) self.create_camera_set() self.parse_json() metadata_filenames = glob(os.path.join(self.ingest_dir, '*')) krts={} for metadata_filename in metadata_filenames: if os.stat(metadata_filename).st_size <= Krt.MAX_SIZE: try: krt_1 = KrtCamera.load(metadata_filename) krts[os.path.basename(metadata_filename)] = krt_1 except: #Hopefully non-krts throw an exception when loading import traceback as tb logger.debug('Non-KRT parsed: %s', tb.format_exc()) matches = match_images(self.image_set.images.all(), krts.keys(), self.json_config) matching_attributes = match_attributes(self.image_set.images.all(), self.json_config) cameras = [] for match in matches: krt_1 = krts[match] attributes = '' logger.debug('%s matched to %s', match, matches[match].filename_path) camera = save_krt(self.task.request.id, matches[match], krt_1.k, krt_1.r, krt_1.t, self.origin_xyz, srid=self.srid, attributes=matching_attributes.get( os.path.basename(matches[match].filename_path), {})) self.camera_set.cameras.add(camera) self.save_scene()
def ingest_data(self, uploadSession_id, imageDir): ''' task for the ingest route, to ingest the data an upload sessions points to ''' import voxel_globe.ingest.models as IngestModels from .tools import loadAdjTaggedMetadata import numpy from voxel_globe.tools.camera import save_krt from PIL import Image uploadSession = IngestModels.UploadSession.objects.get(id=uploadSession_id); #directories = uploadSession.directory.all(); #imageDirectory = directories.filter(name='image') #metaDirectory = directories.filter(name='meta') metadataFilename = glob(os.path.join(imageDir, '*', '*_adj_tagged_images.txt'), False); if not len(metadataFilename) == 1: logger.error('Only one metadatafile should have been found, found %d instead', len(metadataFilename)); try: metadataFilename = metadataFilename[0] (day, timeOfDay) = os.path.split(metadataFilename)[1].split(' '); timeOfDay = timeOfDay.split('_', 1)[0]; except: metadataFilename = os.devnull; day = 'NYA' timeOfDay = 'NYA' imageCollection = voxel_globe.meta.models.ImageCollection.create(name="Arducopter Upload %s %s %s (%s)" % (uploadSession.name, day, timeOfDay, uploadSession_id), service_id = self.request.id); imageCollection.save(); for d in glob(os.path.join(imageDir, '*'+os.path.sep), False): files = glob(os.path.join(d, '*.jpg'), False); files.sort() for f in files: self.update_state(state='PROCESSING', meta={'stage':'File %s of %d' % (f, len(files))}) logger.debug('Processing %s of %s', f, len(files)) zoomifyName = f[:-4] + '_zoomify' pid = Popen(['vips', 'dzsave', f, zoomifyName, '--layout', 'zoomify']) pid.wait(); #convert the slashes to URL slashes relFilePath = urllib.pathname2url(os.path.relpath(f, env['VIP_IMAGE_SERVER_ROOT'])); basename = os.path.split(f)[-1] relZoomPath = urllib.pathname2url(os.path.relpath(zoomifyName, env['VIP_IMAGE_SERVER_ROOT'])); image = Image.open(f) if image.bits == 8: pixel_format = 'b'; if image.bits == 16: pixel_format = 's'; if image.bits == 32: if image.mode == "I": pixel_format = 'i'; elif image.mode == "F": pixel_format = 'f' img = voxel_globe.meta.models.Image.create( name="Arducopter Upload %s (%s) Frame %s" % (uploadSession.name, uploadSession_id, basename), imageWidth=image.size[0], imageHeight=image.size[1], numberColorBands=image.layers, pixelFormat=pixel_format, fileFormat='zoom', imageUrl='%s://%s:%s/%s/%s/' % (env['VIP_IMAGE_SERVER_PROTOCOL'], env['VIP_IMAGE_SERVER_HOST'], env['VIP_IMAGE_SERVER_PORT'], env['VIP_IMAGE_SERVER_URL_PATH'], relZoomPath), originalImageUrl='%s://%s:%s/%s/%s' % (env['VIP_IMAGE_SERVER_PROTOCOL'], env['VIP_IMAGE_SERVER_HOST'], env['VIP_IMAGE_SERVER_PORT'], env['VIP_IMAGE_SERVER_URL_PATH'], relFilePath), service_id = self.request.id); img.save(); imageCollection.images.add(img); self.update_state(state='Processing', meta={'stage':'metadata'}) metadata = loadAdjTaggedMetadata(metadataFilename); for meta in metadata: try: img = imageCollection.images.get(name__icontains='Frame %s'%meta.filename) k = numpy.eye(3); k[0,2] = img.imageWidth/2; k[1,2] = img.imageHeight/2; r = numpy.eye(3); t = [0, 0, 0]; origin = meta.llh_xyz; save_krt(self.request.id, img, k, r, t, origin, srid=7428); except Exception as e: logger.warning('%s', e) logger.error('Could not match metadata entry for %s' % meta.filename) averageGps = numpy.mean(numpy.array(map(lambda x:x.llh_xyz, metadata)), 0); voxel_globe.meta.models.Scene.create(name="Arducopter origin %s (%s)" % (uploadSession.name, uploadSession_id), service_id = self.request.id, origin='SRID=%d;POINT(%0.12f %0.12f %0.12f)' % \ (7428, averageGps[0], averageGps[1], averageGps[2])).save() uploadSession.delete()
def run(self): from vsi.io.image import PilReader from vsi.iglob import glob from vsi.tools import Try from .tools import exif_date_time_parse self.task.update_state(state='Processing', meta={'stage':'metadata'}) self.parse_json() gpsList=[] gpsList2=[] k = np.eye(3) r = np.eye(3) t = [0, 0, 0] matching_attributes = match_attributes(self.image_collection.images.all(), self.json_config) for image in self.image_collection.images.all(): filename = os.path.join(self.ingest_dir, image.original_filename) try: img = PilReader(filename, True) with Try(): exifTags = img.object._getexif() gps = exifTags[34853] if self.date=='': with Try(): try: self.date, self.time_of_day = exif_date_time_parse( exifTags[36867]) except: try: self.date, self.time_of_day = exif_date_time_parse( exifTags[306]) except: try: self.date, self.time_of_day = exif_date_time_parse( exifTags[36868]) except: pass try: latitude = float(gps[2][0][0])/gps[2][0][1] + \ float(gps[2][1][0])/gps[2][1][1]/60.0 + \ float(gps[2][2][0])/gps[2][2][1]/3600.0 if gps[1] == 'N': pass elif gps[1] == 'S': latitude *= -1 else: latitude *= 0 except: latitude = 0 try: longitude = float(gps[4][0][0])/gps[4][0][1] + \ float(gps[4][1][0])/gps[4][1][1]/60.0 + \ float(gps[4][2][0])/gps[4][2][1]/3600.0 if gps[3] == 'W': longitude *= -1 elif gps[3] == 'E': pass else: longitude *= 0 except: longitude = 0 try: #if positive, assume no flag 5 == positive if 5 not in gps or gps[5] == '\x00': altitude = float(gps[6][0])/gps[6][1] else: #negative altitude = float(gps[6][0])/gps[6][1] except: altitude = 0 #Untested code, because I don't have images with this tag! try: if gps[18] == 'WGS-84': #http://www.cipa.jp/std/documents/e/DC-008-2010_E.pdf self.srid = 4326 elif gps[18] == 'EGM96': #I'm guessing here? self.srid = 7428 #EGM 96 except: pass origin = [longitude, latitude, altitude] if not any(np.array(origin[0:2]) == 0): gpsList.append(origin) gpsList2.append(origin) k[0,2] = image.imageWidth/2 k[1,2] = image.imageHeight/2 save_krt(self.task.request.id, image, k, r, t, origin, srid=self.srid, attributes=matching_attributes.get(image.original_filename, {})) except Exception as e: pass logger.error(gpsList) logger.error(gpsList2) try: self.origin_xyz = np.mean(np.array(gpsList), 0) if len(averageGps) != 3: raise ValueError except: self.origin_xyz = np.mean(np.array(gpsList2), 0) logger.error(self.origin_xyz) self.save_scene()
def run(self): from vsi.iglob import glob from .tools import split_clif self.task.update_state(state='Processing', meta={'stage':'metadata'}) metadata_filenames = glob(os.path.join(self.ingest_dir, '*.txt'), False) metadata_filenames = sorted(metadata_filenames, key=lambda s:s.lower()) metadata_basenames = map(lambda x:os.path.basename(x).lower(), metadata_filenames) date = '' time_of_day = '' for metadata_filename in metadata_filenames: #Loop through until one succeeds try: with open(metadata_filename, 'r') as fid: data = fid.readline().split(',') imu_time = float(data[6]) imu_week = int(data[7]) timestamp = datetime(1980, 1, 6) + timedelta(weeks=imu_week, seconds=imu_time) date = '%04d-%02d-%02d' % (timestamp.year, timestamp.month, timestamp.day) time_of_day = '%02d:%02d:%02d.%06d' % (timestamp.hour, timestamp.minute, timestamp.second, timestamp.microsecond) break #Break on first success except: pass #Kinda inefficient, kinda don't care llhs_xyz=[] for metadata_filename in metadata_filenames: with open(metadata_filename, 'r') as fid: metadata = fid.readline().split(',') llh_xyz = [float(metadata[4]), float(metadata[3]), float(metadata[5])*\ Clif.CLIF_DATA[self.CLIF_VERSION]['altitude_conversion']] llhs_xyz.append(llh_xyz) origin_xyz = np.mean(np.array(llhs_xyz), 0) self.parse_json(date=date, time_of_day=time_of_day, origin_xyz=origin_xyz) #Integrate with parse_json OR the itf file. VDL downloads do NOT have this #So I'll go with nope. pixel_format = Clif.CLIF_DATA[self.CLIF_VERSION]['pixel_format'] width = Clif.CLIF_DATA[self.CLIF_VERSION]['width'] height = Clif.CLIF_DATA[self.CLIF_VERSION]['height'] bands = Clif.CLIF_DATA[self.CLIF_VERSION]['bands'] matching_attributes = match_attributes(self.image_collection.images.all(), self.json_config) for image in self.image_collection.images.all(): filename = image.original_filename metadata_filename_desired = split_clif(filename) metadata_filename_desired = '%06d-%s.txt' % (0, metadata_filename_desired[2]) try: metadata_index = metadata_basenames.index(metadata_filename_desired) metadata_filename = metadata_filenames[metadata_index] with open(metadata_filename, 'r') as fid: metadata = fid.readline().split(',') k = np.eye(3) k[0,2] = image.imageWidth/2 k[1,2] = image.imageHeight/2 r = np.eye(3) t = [0, 0, 0] origin = llhs_xyz[metadata_index] save_krt(self.task.request.id, image, k, r, t, origin, srid=self.srid, attributes=matching_attributes.get(image.original_filename, {})) except Exception as e: pass self.save_scene()
def tiepoint_registration(self, image_collection_id, history=None): from PIL import Image import numpy as np from django.contrib.gis import geos import vpgl_adaptor from vsi.io.krt import Krt from voxel_globe.meta import models import voxel_globe.tools from voxel_globe.tools.camera import get_krt, save_krt from voxel_globe.tools.celery import Popen from voxel_globe.tools.xml_dict import load_xml self.update_state(state='INITIALIZE', meta={'id': image_collection_id}) image_collection = models.ImageCollection.objects.get( id=image_collection_id).history(history) control_points = {} for fr, image in enumerate(image_collection.images.all()): image = image.history(history) tiepoint_ids = set([ x for imagen in models.Image.objects.filter(objectId=image.objectId) for x in imagen.tiepoint_set.all().values_list('objectId', flat=True) ]) for tiepoint_id in tiepoint_ids: tiepoint = models.TiePoint.objects.get( objectId=tiepoint_id, newerVersion=None).history(history) #demoware code hack! if 'error' in tiepoint.geoPoint.name.lower(): continue if not tiepoint.deleted: control_point_id = tiepoint.geoPoint.objectId if control_point_id not in control_points: control_points[control_point_id] = {'tiepoints': {}} control_points[control_point_id]['tiepoints'][fr] = list( tiepoint.point) lla_xyz = models.ControlPoint.objects.get( objectId=control_point_id, newerVersion=None).history(history).point.coords control_points[control_point_id]['3d'] = [ lla_xyz[x] for x in [1, 0, 2] ] #filter only control points with more than 1 tiepoint control_points = { k: v for k, v in control_points.iteritems() if len(v['tiepoints'].keys()) > 1 } origin_yxz = np.mean([v['3d'] for k, v in control_points.iteritems()], axis=0) lvcs = vpgl_adaptor.create_lvcs(origin_yxz[0], origin_yxz[1], origin_yxz[2], 'wgs84') for control_point in control_points: control_points[control_point][ 'lvcs'] = vpgl_adaptor.convert_to_local_coordinates2( lvcs, *control_points[control_point]['3d']) images = {} with voxel_globe.tools.task_dir('visualsfm', cd=True) as processing_dir: dummy_imagename = os.path.join(processing_dir, 'blank.jpg') img = Image.fromarray(np.empty([1, 1], dtype=np.uint8)) img.save(dummy_imagename) #Thank you stupid site file for fr, image in enumerate(image_collection.images.all()): (K, R, T, o) = get_krt(image.history(history), history=history) images[fr] = image.objectId with open(os.path.join(processing_dir, 'frame_%05d.txt' % fr), 'w') as fid: print >> fid, (("%0.18f " * 3 + "\n") * 3) % ( K[0, 0], K[0, 1], K[0, 2], K[1, 0], K[1, 1], K[1, 2], K[2, 0], K[2, 1], K[2, 2]) print >> fid, (("%0.18f " * 3 + "\n") * 3) % ( R[0, 0], R[0, 1], R[0, 2], R[1, 0], R[1, 1], R[1, 2], R[2, 0], R[2, 1], R[2, 2]) print >> fid, ("%0.18f " * 3 + "\n") % (T[0, 0], T[1, 0], T[2, 0]) site_in_name = os.path.join(processing_dir, 'site.xml') site_out_name = os.path.join(processing_dir, 'site2.xml') with open(site_in_name, 'w') as fid: fid.write('''<BWM_VIDEO_SITE name="Triangulation"> <videoSiteDir path="%s"> </videoSiteDir> <videoPath path="%s"> </videoPath> <cameraPath path="%s/*.txt"> </cameraPath> <Objects> </Objects>ve <Correspondences>\n''' % (processing_dir, dummy_imagename, processing_dir)) for control_point_index, control_point_id in enumerate( control_points): fid.write('<Correspondence id="%d">\n' % control_point_index) for fr, tie_point in control_points[control_point_id][ 'tiepoints'].iteritems(): fid.write('<CE fr="%d" u="%f" v="%f"/>\n' % (fr, tie_point[0], tie_point[1])) fid.write('</Correspondence>\n') control_points[control_point_id]['id'] = control_point_index fid.write('''</Correspondences> </BWM_VIDEO_SITE>\n''') #triangulate the points Popen([ 'bwm_triangulate_2d_corrs', '-site', site_in_name, '-out', site_out_name ], logger=logger).wait() #Read in the result, and load into points_triangulate structure xml = load_xml(site_out_name) points_triangulate = {'id': [], 'x': [], 'y': [], 'z': []} for correspondence in xml['Correspondences']['Correspondence']: points_triangulate['id'].append(int(correspondence.at['id'])) points_triangulate['x'].append( float(correspondence['corr_world_point'].at['X'])) points_triangulate['y'].append( float(correspondence['corr_world_point'].at['Y'])) points_triangulate['z'].append( float(correspondence['corr_world_point'].at['Z'])) #Read the points out of the control points structure, but make sure they are #in the same order (check id == point_id points_orig = {'x': [], 'y': [], 'z': []} for point_id in points_triangulate['id']: point = [ v['lvcs'] for k, v in control_points.iteritems() if v['id'] == point_id ] points_orig['x'].append(point[0][0]) points_orig['y'].append(point[0][1]) points_orig['z'].append(point[0][2]) new_cameras = os.path.join(processing_dir, 'new_cameras') os.mkdir(new_cameras) #Make transformation transform, scale = vpgl_adaptor.compute_transformation( points_triangulate['x'], points_triangulate['y'], points_triangulate['z'], points_orig['x'], points_orig['y'], points_orig['z'], processing_dir, new_cameras) #calculate the new bounding box bbox_min, bbox_max = vpgl_adaptor.compute_transformed_box( list(image_collection.scene.bbox_min), list(image_collection.scene.bbox_max), transform) #calculate the new voxel size default_voxel_size = geos.Point( *(x * scale for x in image_collection.scene.default_voxel_size)) scene = models.Scene.create( name=image_collection.scene.name + ' tiepoint registered', service_id=self.request.id, origin=geos.Point(origin_yxz[1], origin_yxz[0], origin_yxz[2]), bbox_min=geos.Point(*bbox_min), bbox_max=geos.Point(*bbox_max), default_voxel_size=default_voxel_size, geolocated=True) scene.save() image_collection.scene = scene image_collection.save() for fr, image_id in images.iteritems(): krt = Krt.load(os.path.join(new_cameras, 'frame_%05d.txt' % fr)) image = models.Image.objects.get(objectId=image_id, newerVersion=None) save_krt(self.request.id, image, krt.k, krt.r, krt.t, [origin_yxz[x] for x in [1, 0, 2]], srid=4326)
def tiepoint_registration(self, image_collection_id, history=None): from PIL import Image import numpy as np from django.contrib.gis import geos import vpgl_adaptor from vsi.io.krt import Krt from voxel_globe.meta import models import voxel_globe.tools from voxel_globe.tools.camera import get_krt, save_krt from voxel_globe.tools.celery import Popen from voxel_globe.tools.xml_dict import load_xml self.update_state(state='INITIALIZE', meta={'id':image_collection_id}) image_collection = models.ImageCollection.objects.get(id=image_collection_id).history(history) control_points = {} for fr,image in enumerate(image_collection.images.all()): image = image.history(history) tiepoint_ids = set([x for imagen in models.Image.objects.filter(objectId=image.objectId) for x in imagen.tiepoint_set.all().values_list('objectId', flat=True)]) for tiepoint_id in tiepoint_ids: tiepoint = models.TiePoint.objects.get(objectId=tiepoint_id, newerVersion=None).history(history) #demoware code hack! if 'error' in tiepoint.geoPoint.name.lower(): continue if not tiepoint.deleted: control_point_id = tiepoint.geoPoint.objectId if control_point_id not in control_points: control_points[control_point_id] = {'tiepoints':{}} control_points[control_point_id]['tiepoints'][fr] = list(tiepoint.point) lla_xyz = models.ControlPoint.objects.get(objectId = control_point_id, newerVersion=None).history(history).point.coords control_points[control_point_id]['3d'] = [lla_xyz[x] for x in [1,0,2]] #filter only control points with more than 1 tiepoint control_points = {k:v for k,v in control_points.iteritems() if len(v['tiepoints'].keys()) > 1} origin_yxz = np.mean([v['3d'] for k,v in control_points.iteritems()], axis=0) lvcs = vpgl_adaptor.create_lvcs(origin_yxz[0], origin_yxz[1], origin_yxz[2], 'wgs84') for control_point in control_points: control_points[control_point]['lvcs'] = vpgl_adaptor.convert_to_local_coordinates2(lvcs, *control_points[control_point]['3d']) images = {} with voxel_globe.tools.task_dir('visualsfm', cd=True) as processing_dir: dummy_imagename = os.path.join(processing_dir, 'blank.jpg') img = Image.fromarray(np.empty([1,1], dtype=np.uint8)) img.save(dummy_imagename) #Thank you stupid site file for fr,image in enumerate(image_collection.images.all()): (K,R,T,o) = get_krt(image.history(history), history=history) images[fr] = image.objectId with open(os.path.join(processing_dir, 'frame_%05d.txt' % fr), 'w') as fid: print >>fid, (("%0.18f "*3+"\n")*3) % (K[0,0], K[0,1], K[0,2], K[1,0], K[1,1], K[1,2], K[2,0], K[2,1], K[2,2]); print >>fid, (("%0.18f "*3+"\n")*3) % (R[0,0], R[0,1], R[0,2], R[1,0], R[1,1], R[1,2], R[2,0], R[2,1], R[2,2]); print >>fid, ("%0.18f "*3+"\n") % (T[0,0], T[1,0], T[2,0]); site_in_name = os.path.join(processing_dir, 'site.xml') site_out_name = os.path.join(processing_dir, 'site2.xml') with open(site_in_name, 'w') as fid: fid.write('''<BWM_VIDEO_SITE name="Triangulation"> <videoSiteDir path="%s"> </videoSiteDir> <videoPath path="%s"> </videoPath> <cameraPath path="%s/*.txt"> </cameraPath> <Objects> </Objects>ve <Correspondences>\n''' % (processing_dir, dummy_imagename, processing_dir)) for control_point_index, control_point_id in enumerate(control_points): fid.write('<Correspondence id="%d">\n' % control_point_index) for fr, tie_point in control_points[control_point_id]['tiepoints'].iteritems(): fid.write('<CE fr="%d" u="%f" v="%f"/>\n' % (fr, tie_point[0], tie_point[1])) fid.write('</Correspondence>\n') control_points[control_point_id]['id'] = control_point_index fid.write('''</Correspondences> </BWM_VIDEO_SITE>\n''') #triangulate the points Popen(['bwm_triangulate_2d_corrs', '-site', site_in_name, '-out', site_out_name], logger=logger).wait() #Read in the result, and load into points_triangulate structure xml = load_xml(site_out_name) points_triangulate = {'id':[], 'x':[], 'y':[], 'z':[]} for correspondence in xml['Correspondences']['Correspondence']: points_triangulate['id'].append(int(correspondence.at['id'])) points_triangulate['x'].append(float(correspondence['corr_world_point'].at['X'])) points_triangulate['y'].append(float(correspondence['corr_world_point'].at['Y'])) points_triangulate['z'].append(float(correspondence['corr_world_point'].at['Z'])) #Read the points out of the control points structure, but make sure they are #in the same order (check id == point_id points_orig = {'x':[], 'y':[], 'z':[]} for point_id in points_triangulate['id']: point = [v['lvcs'] for k,v in control_points.iteritems() if v['id'] == point_id] points_orig['x'].append(point[0][0]) points_orig['y'].append(point[0][1]) points_orig['z'].append(point[0][2]) new_cameras = os.path.join(processing_dir, 'new_cameras') os.mkdir(new_cameras) #Make transformation transform, scale = vpgl_adaptor.compute_transformation(points_triangulate['x'], points_triangulate['y'], points_triangulate['z'], points_orig['x'],points_orig['y'],points_orig['z'], processing_dir, new_cameras) #calculate the new bounding box bbox_min, bbox_max = vpgl_adaptor.compute_transformed_box(list(image_collection.scene.bbox_min), list(image_collection.scene.bbox_max), transform) #calculate the new voxel size default_voxel_size=geos.Point(*(x*scale for x in image_collection.scene.default_voxel_size)) scene = models.Scene.create(name=image_collection.scene.name+' tiepoint registered', service_id=self.request.id, origin=geos.Point(origin_yxz[1], origin_yxz[0], origin_yxz[2]), bbox_min=geos.Point(*bbox_min), bbox_max=geos.Point(*bbox_max), default_voxel_size=default_voxel_size, geolocated=True) scene.save() image_collection.scene=scene image_collection.save() for fr, image_id in images.iteritems(): krt = Krt.load(os.path.join(new_cameras, 'frame_%05d.txt' % fr)) image = models.Image.objects.get(objectId=image_id, newerVersion=None) save_krt(self.request.id, image, krt.k, krt.r, krt.t, [origin_yxz[x] for x in [1,0,2]], srid=4326)
def ingest_data(self, uploadSession_id, imageDir): ''' task for the ingest route, to ingest the data an upload sessions points to ''' import voxel_globe.ingest.models as IngestModels from voxel_globe.tools.camera import save_krt from PIL import Image from datetime import datetime, timedelta from .tools import split_clif uploadSession = IngestModels.UploadSession.objects.get(id=uploadSession_id); metadataFilenames = glob(os.path.join(imageDir, '*.txt'), False); metadataFilenames = sorted(metadataFilenames, key=lambda s:s.lower()) metadataBasenames = map(lambda x:os.path.basename(x).lower(), metadataFilenames) #In case none of them succeeded... date = 'NYA' timeOfDay = 'NYA' for metadata_filename in metadataFilenames: #Loop through until one succeeds try: with open(metadata_filename, 'r') as fid: data = fid.readline().split(',') imu_time = float(data[6]) imu_week = int(data[7]) timestamp = datetime(1980, 1, 6) + timedelta(weeks=imu_week, seconds=imu_time) date = '%04d-%02d-%02d' % (timestamp.year, timestamp.month, timestamp.day) timeOfDay = '%02d:%02d:%02d.%06d' % (timestamp.hour, timestamp.minute, timestamp.second, timestamp.microsecond) break #Break on first success except: pass imageCollection = voxel_globe.meta.models.ImageCollection.create( name="CLIF Upload %s %s %s (%s)" % (uploadSession.name, date, timeOfDay, uploadSession_id), service_id = self.request.id); imageCollection.save(); llhs_xyz = [] #for d in glob(os.path.join(imageDir, '*'+os.path.sep), False): if 1: files = glob(os.path.join(imageDir, '*'+os.extsep+'raw'), False); files.sort() for index,f in enumerate(files): self.update_state(state='PROCESSING', meta={'stage':'File %s (%d of %d)' % (f, index+1, len(files))}) logger.debug('Processing %s (%d of %d)', f, index+1, len(files)) basename = os.path.basename(f) img_filename = os.extsep.join([os.path.splitext(f)[0], 'png']) with open(f, 'rb') as fid: data = fid.read(); img = np.fromstring(data, dtype=CLIF_DATA[CLIF_VERSION]['dtype']).reshape( (CLIF_DATA[CLIF_VERSION]['width'], CLIF_DATA[CLIF_VERSION]['height'])).T img2 = Image.fromarray(img) img2.save(img_filename) zoomifyName = os.path.splitext(f)[0] + '_zoomify' pid = Popen(['vips', 'dzsave', img_filename, zoomifyName, '--layout', 'zoomify']) pid.wait(); #convert the slashes to URL slashes relFilePath = urllib.pathname2url(os.path.relpath(img_filename, env['VIP_IMAGE_SERVER_ROOT'])); basename = os.path.split(f)[-1] relZoomPath = urllib.pathname2url(os.path.relpath(zoomifyName, env['VIP_IMAGE_SERVER_ROOT'])); pixel_format = CLIF_DATA[CLIF_VERSION]['pixel_format'] width = CLIF_DATA[CLIF_VERSION]['width'] height = CLIF_DATA[CLIF_VERSION]['height'] bands = CLIF_DATA[CLIF_VERSION]['bands'] img = voxel_globe.meta.models.Image.create( name="CLIF Upload %s (%s) Frame %s" % (uploadSession.name, uploadSession_id, basename), imageWidth=width, imageHeight=height, numberColorBands=bands, pixelFormat=pixel_format, fileFormat='zoom', imageUrl='%s://%s:%s/%s/%s/' % (env['VIP_IMAGE_SERVER_PROTOCOL'], env['VIP_IMAGE_SERVER_HOST'], env['VIP_IMAGE_SERVER_PORT'], env['VIP_IMAGE_SERVER_URL_PATH'], relZoomPath), originalImageUrl='%s://%s:%s/%s/%s' % (env['VIP_IMAGE_SERVER_PROTOCOL'], env['VIP_IMAGE_SERVER_HOST'], env['VIP_IMAGE_SERVER_PORT'], env['VIP_IMAGE_SERVER_URL_PATH'], relFilePath), service_id = self.request.id); img.save(); imageCollection.images.add(img); metadata_filename_desired = split_clif(f) metadata_filename_desired = '%06d-%s.txt' % (0, metadata_filename_desired[2]) if 1: # try: metadata_index = metadataBasenames.index(metadata_filename_desired) metadata_filename = metadataFilenames[metadata_index] with open(metadata_filename, 'r') as fid: metadata = fid.readline().split(',') llh_xyz = [float(metadata[4]), float(metadata[3]), float(metadata[5])*CLIF_DATA[CLIF_VERSION]['altitude_conversion']] llhs_xyz.append(llh_xyz) k = np.eye(3); k[0,2] = width/2; k[1,2] = height/2; r = np.eye(3); t = [0, 0, 0]; origin = llh_xyz; save_krt(self.request.id, img, k, r, t, origin, srid=4326); # except Exception as e: pass averageGps = np.mean(np.array(llhs_xyz), 0); voxel_globe.meta.models.Scene.create(name="CLIF origin %s (%s)" % (uploadSession.name, uploadSession_id), service_id = self.request.id, origin='SRID=%d;POINT(%0.12f %0.12f %0.12f)' % \ (4326, averageGps[0], averageGps[1], averageGps[2])).save() uploadSession.delete()
def run(self): from vsi.iglob import glob from .tools import split_clif self.task.update_state(state='Processing', meta={'stage': 'metadata'}) metadata_filenames = glob(os.path.join(self.ingest_dir, '*.txt'), False) metadata_filenames = sorted(metadata_filenames, key=lambda s: s.lower()) metadata_basenames = map(lambda x: os.path.basename(x).lower(), metadata_filenames) date = '' time_of_day = '' for metadata_filename in metadata_filenames: #Loop through until one succeeds try: with open(metadata_filename, 'r') as fid: data = fid.readline().split(',') imu_time = float(data[6]) imu_week = int(data[7]) timestamp = datetime(1980, 1, 6) + timedelta(weeks=imu_week, seconds=imu_time) date = '%04d-%02d-%02d' % (timestamp.year, timestamp.month, timestamp.day) time_of_day = '%02d:%02d:%02d.%06d' % ( timestamp.hour, timestamp.minute, timestamp.second, timestamp.microsecond) break #Break on first success except: pass #Kinda inefficient, kinda don't care llhs_xyz = [] for metadata_filename in metadata_filenames: with open(metadata_filename, 'r') as fid: metadata = fid.readline().split(',') llh_xyz = [float(metadata[4]), float(metadata[3]), float(metadata[5])*\ Clif.CLIF_DATA[self.CLIF_VERSION]['altitude_conversion']] llhs_xyz.append(llh_xyz) origin_xyz = np.mean(np.array(llhs_xyz), 0) self.parse_json(date=date, time_of_day=time_of_day, origin_xyz=origin_xyz) #Integrate with parse_json OR the itf file. VDL downloads do NOT have this #So I'll go with nope. pixel_format = Clif.CLIF_DATA[self.CLIF_VERSION]['pixel_format'] width = Clif.CLIF_DATA[self.CLIF_VERSION]['width'] height = Clif.CLIF_DATA[self.CLIF_VERSION]['height'] bands = Clif.CLIF_DATA[self.CLIF_VERSION]['bands'] matching_attributes = match_attributes( self.image_collection.images.all(), self.json_config) for image in self.image_collection.images.all(): filename = image.original_filename metadata_filename_desired = split_clif(filename) metadata_filename_desired = '%06d-%s.txt' % ( 0, metadata_filename_desired[2]) try: metadata_index = metadata_basenames.index( metadata_filename_desired) metadata_filename = metadata_filenames[metadata_index] with open(metadata_filename, 'r') as fid: metadata = fid.readline().split(',') k = np.eye(3) k[0, 2] = image.imageWidth / 2 k[1, 2] = image.imageHeight / 2 r = np.eye(3) t = [0, 0, 0] origin = llhs_xyz[metadata_index] save_krt(self.task.request.id, image, k, r, t, origin, srid=self.srid, attributes=matching_attributes.get( image.original_filename, {})) except Exception as e: pass self.save_scene()
def ingest_data(self, uploadSession_id, imageDir): ''' task for the ingest route, to ingest the data an upload sessions points to ''' import voxel_globe.ingest.models as IngestModels import numpy from voxel_globe.tools.camera import save_krt uploadSession = IngestModels.UploadSession.objects.get(id=uploadSession_id); #directories = uploadSession.directory.all(); #imageDirectory = directories.filter(name='image') #metaDirectory = directories.filter(name='meta') imageCollection = voxel_globe.meta.models.ImageCollection.create(name="Generic Upload %s (%s)" % (uploadSession.name, uploadSession_id), service_id = self.request.id); imageCollection.save(); r = numpy.eye(3); t = [0, 0, 0]; gpsList = [] gpsList2 = [] for d in glob(os.path.join(imageDir, '*'+os.path.sep), False): files = glob(os.path.join(d, '*'), False); files.sort() for f in files: self.update_state(state='PROCESSING', meta={'stage':'File %s of %d' % (f, len(files))}) zoomifyName = f[:-4] + '_zoomify' pid = Popen(['vips', 'dzsave', f, zoomifyName, '--layout', 'zoomify']) pid.wait(); #convert the slashes to URL slashes relFilePath = urllib.pathname2url(os.path.relpath(f, env['VIP_IMAGE_SERVER_ROOT'])); basename = os.path.split(f)[-1] relZoomPath = urllib.pathname2url(os.path.relpath(zoomifyName, env['VIP_IMAGE_SERVER_ROOT'])); with open(f, 'rb') as fid: magic = fid.read(4) image_info = {} if magic == '49492A00'.decode('hex') or \ magic == '4D4D002A'.decode('hex'): logger.debug('Tifffile: %s', f) from tifffile import TiffFile with TiffFile(f) as image: if image.pages[0].dtype == 's': image_info['dtype'] = numpy.dtype('S') else: image_info['dtype'] = numpy.dtype(image.pages[0].dtype) image_info['bps'] = image.pages[0].bits_per_sample image_info['height'] = image.pages[0].shape[0] #Yep, y,x,z order image_info['width'] = image.pages[0].shape[1] try: image_info['bands'] = image.pages[0].shape[2] except IndexError: image_info['bands'] = 1 else: logger.debug('Pil: %s', f) from PIL import Image with Image.open(f) as image: #The getmode* commands do not give you the REAL datatypes. I need the #REAL (numpy in this case) bps, not some random PIL designation image_info['dtype'] = numpy.dtype(Image._MODE_CONV[image.mode][0]) #probably doesn't work well for bool... Oh well image_info['bps'] = image_info['dtype'].itemsize*8 image_info['width'] = image.size[0] #Yep, x,y order image_info['height'] = image.size[1] image_info['bands'] = Image.getmodebands(image.mode) img = voxel_globe.meta.models.Image.create( name="Generic Upload %s (%s) Frame %s" % (uploadSession.name, uploadSession_id, basename), imageWidth=image_info['width'], imageHeight=image_info['height'], numberColorBands=image_info['bands'], pixelFormat=image_info['dtype'].char, fileFormat='zoom', imageUrl='%s://%s:%s/%s/%s/' % (env['VIP_IMAGE_SERVER_PROTOCOL'], env['VIP_IMAGE_SERVER_HOST'], env['VIP_IMAGE_SERVER_PORT'], env['VIP_IMAGE_SERVER_URL_PATH'], relZoomPath), originalImageUrl='%s://%s:%s/%s/%s' % (env['VIP_IMAGE_SERVER_PROTOCOL'], env['VIP_IMAGE_SERVER_HOST'], env['VIP_IMAGE_SERVER_PORT'], env['VIP_IMAGE_SERVER_URL_PATH'], relFilePath), service_id = self.request.id); img.save(); imageCollection.images.add(img); origin = [0,0,0]; logger.debug('Origin is: %s' % origin) k = numpy.eye(3); k[0,2] = image_info['width']/2; k[1,2] = image_info['height']/2; save_krt(self.request.id, img, k, r, t, origin); voxel_globe.meta.models.Scene.create(name="Generic origin %s (%s)" % (uploadSession.name, uploadSession_id), service_id = self.request.id, geolocated=False, origin='POINT(%0.12f %0.12f %0.12f)' % \ (0,0,0)).save() uploadSession.delete()
def runVisualSfm(self, imageCollectionId, sceneId, cleanup=True, history=None): from voxel_globe.meta import models from voxel_globe.order.visualsfm.models import Order from os import environ as env from os.path import join as path_join import os import shutil from .tools import writeNvm, writeGcpFile, generateMatchPoints, runSparse,\ readNvm import voxel_globe.tools from voxel_globe.tools.wget import download as wget from voxel_globe.tools.camera import get_kto import voxel_globe.tools.enu as enu import numpy import boxm2_adaptor import boxm2_scene_adaptor from voxel_globe.tools.xml_dict import load_xml from django.contrib.gis.geos import Point from voxel_globe.tools.image import convert_image from distutils.spawn import find_executable from glob import glob self.update_state(state='INITIALIZE', meta={'stage':0}) #Make main temp dir and cd into it with voxel_globe.tools.task_dir('visualsfm', cd=True) as processing_dir: #Because visualsfm is so... bad, I have to copy it locally so I can #configure it visualsfm_exe = os.path.join(processing_dir, os.path.basename(os.environ['VIP_VISUALSFM_EXE'])) shutil.copy(find_executable(os.environ['VIP_VISUALSFM_EXE']), visualsfm_exe) with open(os.path.join(processing_dir, 'nv.ini'), 'w') as fid: fid.write('param_search_multiple_models 0\n') fid.write('param_use_siftgpu 2\n') matchFilename = path_join(processing_dir, 'match.nvm'); sparce_filename = path_join(processing_dir, 'sparse.nvm'); #This can NOT be changed in version 0.5.25 gcpFilename = matchFilename + '.gcp' logger.debug('Task %s is processing in %s' % (self.request.id, processing_dir)) image_collection = models.ImageCollection.objects.get( id=imageCollectionId).history(history); imageList = image_collection.images.all(); #A Little bit of database logging oid = Order(processingDir=processing_dir, imageCollection=image_collection) ### if 1: ### try: #Not fully integrated yet ### sift_gpu = siftgpu.SiftGPU() ### except: ### pass localImageList = []; for x in range(len(imageList)): #Download the image locally image = imageList[x].history(history); self.update_state(state='INITIALIZE', meta={'stage':'image fetch', 'i':x, 'total':len(imageList)}) imageName = image.originalImageUrl; extension = os.path.splitext(imageName)[1].lower() localName = path_join(processing_dir, 'frame_%05d%s' % (x+1, extension)); wget(imageName, localName, secret=True) #Convert the image if necessary if extension not in ['.jpg', '.jpeg', '.pgm', '.ppm']: self.update_state(state='INITIALIZE', meta={'stage':'image convert', 'i':x, 'total':len(imageList)}) #Add code here to converty to jpg for visual sfm if extension in ['.png']:#'not implemented': from PIL import Image image_temp = Image.open(localName) if len(image_temp.mode) > 1: #Stupid visual sfm is picky :( new_local_name = os.path.splitext(localName)[0] + '.ppm'; else: new_local_name = os.path.splitext(localName)[0] + '.pgm'; new_local_name = os.path.splitext(localName)[0] + '.jpg'; ###ingest.convert_image(localName, new_local_name, 'PNM') convert_image(localName, new_local_name, 'JPEG', options=('QUALITY=100',)) os.remove(localName) localName = new_local_name; else: raise Exception('Unsupported file type'); imageInfo = {'localName':localName, 'index':x} try: [K, T, llh] = get_kto(image, history=history); imageInfo['K_intrinsics'] = K; imageInfo['transformation'] = T; imageInfo['enu_origin'] = llh; except: pass localImageList.append(imageInfo); ### if 1: ### try: #not fully integrated yet ### sift_gpu.create_sift(localName, os.path.splitext(localName)[0]+'.sift') ### except: ### pass # filenames = list(imageList.values_list('imageUrl')) # logger.info('The image list 0is %s' % filenames) self.update_state(state='PROCESSING', meta={'stage':'generate match points', 'processing_dir':processing_dir, 'total':len(imageList)}) generateMatchPoints(map(lambda x:x['localName'], localImageList), matchFilename, logger=logger, executable=visualsfm_exe) # cameras = []; # for image in imageList: # if 1: # #try: # [K, T, llh] = get_kto(image); # cameras.append({'image':image.id, 'K':K, 'tranformation': # T, 'origin':llh}) # #except: # pass # origin = numpy.median(origin, axis=0) # origin = [-92.215197, 37.648858, 268.599] scene = models.Scene.objects.get(id=sceneId).history(history) origin = list(scene.origin) if scene.geolocated: self.update_state(state='PROCESSING', meta={'stage':'writing gcp points'}) #find the middle origin, and make it THE origin data = []#.name .llh_xyz for imageInfo in localImageList: try: r = imageInfo['transformation'][0:3, 0:3] t = imageInfo['transformation'][0:3, 3:] enu_point = -r.transpose().dot(t); if not numpy.array_equal(imageInfo['enu_origin'], origin): ecef = enu.enu2xyz(refLong=imageInfo['enu_origin'][0], refLat=imageInfo['enu_origin'][1], refH=imageInfo['enu_origin'][2], #e=imageInfo['transformation'][0, 3], #n=imageInfo['transformation'][1, 3], #u=imageInfo['transformation'][2, 3]) e=enu_point[0], n=enu_point[1], u=enu_point[2]) enu_point = enu.xyz2enu(refLong=origin[0], refLat=origin[1], refH=origin[2], X=ecef[0], Y=ecef[1], Z=ecef[2]) # else: # enu_point = imageInfo['transformation'][0:3, 3]; dataBit = {'filename':imageInfo['localName'], 'xyz':enu_point} data.append(dataBit); #Make this a separate ingest process, making CAMERAS linked to the #images #data = arducopter.loadAdjTaggedMetadata( # r'd:\visualsfm\2014-03-20 13-22-44_adj_tagged_images.txt'); #Make this read the cameras from the DB instead writeGcpFile(data, gcpFilename) except: #some images may have no camera pass oid.lvcsOrigin = str(origin) oid.save() self.update_state(state='PROCESSING', meta={'stage':'sparse SFM'}) runSparse(matchFilename, sparce_filename, gcp=scene.geolocated, shared=True, logger=logger, executable=visualsfm_exe) self.update_state(state='FINALIZE', meta={'stage':'loading resulting cameras'}) #prevent bundle2scene from getting confused and crashing sift_data = os.path.join(processing_dir, 'sift_data') os.mkdir(sift_data) for filename in glob(os.path.join(processing_dir, '*.mat')) +\ glob(os.path.join(processing_dir, '*.sift')): shutil.move(filename, sift_data) if scene.geolocated: #Create a uscene.xml for the geolocated case. All I want out of this is #the bounding box and gsd calculation. boxm2_adaptor.bundle2scene(sparce_filename, processing_dir, isalign=False, out_dir="") cams = readNvm(path_join(processing_dir, 'sparse.nvm')) #cams.sort(key=lambda x:x.name) #Since the file names are frame_00001, etc... and you KNOW this order is #identical to localImageList, with some missing for cam in cams: frameName = cam.name; #frame_00001, etc.... imageInfo = filter(lambda x: x['localName'].endswith(frameName), localImageList)[0] #I have to use endswith instead of == because visual sfm APPARENTLY #decides to take some liberty and make absolute paths relative image = imageList[imageInfo['index']].history(history) (k,r,t) = cam.krt(width=image.imageWidth, height=image.imageHeight); logger.info('Origin is %s' % str(origin)) llh_xyz = enu.enu2llh(lon_origin=origin[0], lat_origin=origin[1], h_origin=origin[2], east=cam.translation_xyz[0], north=cam.translation_xyz[1], up=cam.translation_xyz[2]) grcs = models.GeoreferenceCoordinateSystem.create( name='%s 0' % image.name, xUnit='d', yUnit='d', zUnit='m', location='SRID=4326;POINT(%0.15f %0.15f %0.15f)' % (origin[0], origin[1], origin[2]), service_id = self.request.id) grcs.save() cs = models.CartesianCoordinateSystem.create( name='%s 1' % (image.name), service_id = self.request.id, xUnit='m', yUnit='m', zUnit='m'); cs.save() transform = models.CartesianTransform.create( name='%s 1_0' % (image.name), service_id = self.request.id, rodriguezX=Point(*r[0,:]), rodriguezY=Point(*r[1,:]), rodriguezZ=Point(*r[2,:]), translation=Point(t[0][0], t[1][0], t[2][0]), coordinateSystem_from_id=grcs.id, coordinateSystem_to_id=cs.id) transform.save() camera = image.camera; try: camera.update(service_id = self.request.id, focalLengthU=k[0,0], focalLengthV=k[1,1], principalPointU=k[0,2], principalPointV=k[1,2], coordinateSystem=cs); except: camera = models.Camera.create(name=image.name, service_id = self.request.id, focalLengthU=k[0,0], focalLengthV=k[1,1], principalPointU=k[0,2], principalPointV=k[1,2], coordinateSystem=cs); camera.save(); image.update(camera = camera); logger.info(str(cams[0])) else: from vsi.tools.natural_sort import natural_sorted from glob import glob from vsi.io.krt import Krt from voxel_globe.tools.camera import save_krt boxm2_adaptor.bundle2scene(sparce_filename, processing_dir, isalign=True, out_dir=processing_dir) #While the output dir is used for the b2s folders, uscene.xml is cwd #They are both set to processing_dir, so everything works out well aligned_cams = glob(os.path.join(processing_dir, 'cams_krt', '*')) #sort them naturally in case there are more then 99,999 files aligned_cams = natural_sorted(aligned_cams) if len(aligned_cams) != len(imageList): #Create a new image collection new_image_collection = models.ImageCollection.create( name="SFM Result Subset (%s)" % image_collection.name, service_id = self.request.id); # for image in image_collection.images.all(): # new_image_collection.images.add(image) new_image_collection.save(); frames_keep = set(map(lambda x: int(os.path.splitext(x.split('_')[-2])[0])-1, aligned_cams)) for frame_index in frames_keep: new_image_collection.images.add(imageList[frame_index]) # frames_remove = set(xrange(len(imageList))) - frames_keep # # for remove_index in list(frames_remove): # #The frame number refers to the nth image in the image collection, # #so frame_00100.tif is the 100th image, starting the index at one # #See local_name above # # #remove the images sfm threw away # new_image_collection.remove(imageList[remove_index]) image_collection = new_image_collection frames_keep = list(frames_keep) else: frames_keep = xrange(len(aligned_cams)) #---Update the camera models in the database.--- for camera_index, frame_index in enumerate(frames_keep): krt = Krt.load(aligned_cams[camera_index]) image = imageList[frame_index].history(history) save_krt(self.request.id, image, krt.k, krt.r, krt.t, [0,0,0], srid=4326) #---Update scene information important for the no-metadata case --- scene_filename = os.path.join(processing_dir, 'model', 'uscene.xml') boxm_scene = boxm2_scene_adaptor.boxm2_scene_adaptor(scene_filename) scene.bbox_min = 'POINT(%0.15f %0.15f %0.15f)' % boxm_scene.bbox[0] scene.bbox_max = 'POINT(%0.15f %0.15f %0.15f)' % boxm_scene.bbox[1] #This is not a complete or good function really... but it will get me the #information I need. scene_dict = load_xml(scene_filename) block = scene_dict['block'] scene.default_voxel_size='POINT(%f %f %f)' % \ (float(block.at['dim_x']), float(block.at['dim_y']), float(block.at['dim_z'])) scene.save() return oid.id;
def run(self): from vsi.io.image import PilReader from vsi.iglob import glob from vsi.tools import Try from .tools import exif_date_time_parse self.task.update_state(state='Processing', meta={'stage': 'metadata'}) self.parse_json() gpsList = [] gpsList2 = [] k = np.eye(3) r = np.eye(3) t = [0, 0, 0] matching_attributes = match_attributes( self.image_collection.images.all(), self.json_config) for image in self.image_collection.images.all(): filename = os.path.join(self.ingest_dir, image.original_filename) try: img = PilReader(filename, True) with Try(): exifTags = img.object._getexif() gps = exifTags[34853] if self.date == '': with Try(): try: self.date, self.time_of_day = exif_date_time_parse( exifTags[36867]) except: try: self.date, self.time_of_day = exif_date_time_parse( exifTags[306]) except: try: self.date, self.time_of_day = exif_date_time_parse( exifTags[36868]) except: pass try: latitude = float(gps[2][0][0])/gps[2][0][1] + \ float(gps[2][1][0])/gps[2][1][1]/60.0 + \ float(gps[2][2][0])/gps[2][2][1]/3600.0 if gps[1] == 'N': pass elif gps[1] == 'S': latitude *= -1 else: latitude *= 0 except: latitude = 0 try: longitude = float(gps[4][0][0])/gps[4][0][1] + \ float(gps[4][1][0])/gps[4][1][1]/60.0 + \ float(gps[4][2][0])/gps[4][2][1]/3600.0 if gps[3] == 'W': longitude *= -1 elif gps[3] == 'E': pass else: longitude *= 0 except: longitude = 0 try: #if positive, assume no flag 5 == positive if 5 not in gps or gps[5] == '\x00': altitude = float(gps[6][0]) / gps[6][1] else: #negative altitude = float(gps[6][0]) / gps[6][1] except: altitude = 0 #Untested code, because I don't have images with this tag! try: if gps[18] == 'WGS-84': #http://www.cipa.jp/std/documents/e/DC-008-2010_E.pdf self.srid = 4326 elif gps[18] == 'EGM96': #I'm guessing here? self.srid = 7428 #EGM 96 except: pass origin = [longitude, latitude, altitude] if not any(np.array(origin[0:2]) == 0): gpsList.append(origin) gpsList2.append(origin) k[0, 2] = image.imageWidth / 2 k[1, 2] = image.imageHeight / 2 save_krt(self.task.request.id, image, k, r, t, origin, srid=self.srid, attributes=matching_attributes.get( image.original_filename, {})) except Exception as e: pass logger.error(gpsList) logger.error(gpsList2) try: self.origin_xyz = np.mean(np.array(gpsList), 0) if len(averageGps) != 3: raise ValueError except: self.origin_xyz = np.mean(np.array(gpsList2), 0) logger.error(self.origin_xyz) self.save_scene()
def run(self): from vsi.iglob import glob self.task.update_state(state='Processing', meta={'stage': 'metadata'}) metadata_filenames = glob(os.path.join(self.ingest_dir, '*.pos'), False) metadata_filenames = sorted(metadata_filenames, key=lambda s: s.lower()) metadata_basenames = map(lambda x: os.path.split(x)[-1].lower(), metadata_filenames) for metadata_filename in metadata_filenames: # try: timestamp = os.path.split(metadata_filenames[0])[1].split('-')[0] date = timestamp[0:4] + '-' + timestamp[4:6] + '-' + timestamp[6:8] time_of_day = timestamp[8:10] + ':' + timestamp[ 10:12] + ':' + timestamp[12:14] break #on first success # except: pass llhs_xyz = [] for metadata_filename in metadata_filenames: with open(metadata_filename, 'r') as fid: metadata = fid.readline().split(',') llh_xyz = [float(metadata[5]), float(metadata[4]), float(metadata[6]) \ *AngelFire.AF_DATA[self.AF_VERSION]['altitude_conversion']] llhs_xyz.append(llh_xyz) origin_xyz = np.mean(np.array(llhs_xyz), 0) self.parse_json(date=date, time_of_day=time_of_day, origin_xyz=origin_xyz) matching_attributes = match_attributes( self.image_collection.images.all(), self.json_config) for image in self.image_collection.images.all(): filename = image.original_filename metadata_filename_desired = ( os.path.splitext(os.path.split(filename)[-1])[0][0:-6] + '00-VIS.pos').lower() try: metadata_index = metadata_basenames.index( metadata_filename_desired) metadata_filename = metadata_filenames[metadata_index] k = np.eye(3) k[0, 2] = image.imageWidth / 2 k[1, 2] = image.imageHeight / 2 r = np.eye(3) t = [0, 0, 0] origin = llhs_xyz[metadata_index] save_krt(self.task.request.id, image, k, r, t, origin, srid=self.srid, attributes=matching_attributes.get( image.original_filename, {})) except Exception as e: pass self.save_scene()
def runVisualSfm(self, imageCollectionId, sceneId, cleanup=True, history=None): from voxel_globe.meta import models from voxel_globe.order.visualsfm.models import Order from os import environ as env from os.path import join as path_join import os import shutil from .tools import writeNvm, writeGcpFile, generateMatchPoints, runSparse,\ readNvm import voxel_globe.tools from voxel_globe.tools.wget import download as wget from voxel_globe.tools.camera import get_kto import voxel_globe.tools.enu as enu import numpy import boxm2_adaptor import boxm2_scene_adaptor from voxel_globe.tools.xml_dict import load_xml from django.contrib.gis.geos import Point from voxel_globe.tools.image import convert_image from distutils.spawn import find_executable from glob import glob self.update_state(state='INITIALIZE', meta={'stage': 0}) #Make main temp dir and cd into it with voxel_globe.tools.task_dir('visualsfm', cd=True) as processing_dir: #Because visualsfm is so... bad, I have to copy it locally so I can #configure it visualsfm_exe = os.path.join( processing_dir, os.path.basename(os.environ['VIP_VISUALSFM_EXE'])) shutil.copy(find_executable(os.environ['VIP_VISUALSFM_EXE']), visualsfm_exe) with open(os.path.join(processing_dir, 'nv.ini'), 'w') as fid: fid.write('param_search_multiple_models 0\n') fid.write('param_use_siftgpu 2\n') matchFilename = path_join(processing_dir, 'match.nvm') sparce_filename = path_join(processing_dir, 'sparse.nvm') #This can NOT be changed in version 0.5.25 gcpFilename = matchFilename + '.gcp' logger.debug('Task %s is processing in %s' % (self.request.id, processing_dir)) image_collection = models.ImageCollection.objects.get( id=imageCollectionId).history(history) imageList = image_collection.images.all() #A Little bit of database logging oid = Order(processingDir=processing_dir, imageCollection=image_collection) ### if 1: ### try: #Not fully integrated yet ### sift_gpu = siftgpu.SiftGPU() ### except: ### pass localImageList = [] for x in range(len(imageList)): #Download the image locally image = imageList[x].history(history) self.update_state(state='INITIALIZE', meta={ 'stage': 'image fetch', 'i': x, 'total': len(imageList) }) imageName = image.originalImageUrl extension = os.path.splitext(imageName)[1].lower() localName = path_join(processing_dir, 'frame_%05d%s' % (x + 1, extension)) wget(imageName, localName, secret=True) #Convert the image if necessary if extension not in ['.jpg', '.jpeg', '.pgm', '.ppm']: self.update_state(state='INITIALIZE', meta={ 'stage': 'image convert', 'i': x, 'total': len(imageList) }) #Add code here to converty to jpg for visual sfm if extension in ['.png']: #'not implemented': from PIL import Image image_temp = Image.open(localName) if len(image_temp.mode ) > 1: #Stupid visual sfm is picky :( new_local_name = os.path.splitext( localName)[0] + '.ppm' else: new_local_name = os.path.splitext( localName)[0] + '.pgm' new_local_name = os.path.splitext(localName)[0] + '.jpg' ###ingest.convert_image(localName, new_local_name, 'PNM') convert_image(localName, new_local_name, 'JPEG', options=('QUALITY=100', )) os.remove(localName) localName = new_local_name else: raise Exception('Unsupported file type') imageInfo = {'localName': localName, 'index': x} try: [K, T, llh] = get_kto(image, history=history) imageInfo['K_intrinsics'] = K imageInfo['transformation'] = T imageInfo['enu_origin'] = llh except: pass localImageList.append(imageInfo) ### if 1: ### try: #not fully integrated yet ### sift_gpu.create_sift(localName, os.path.splitext(localName)[0]+'.sift') ### except: ### pass # filenames = list(imageList.values_list('imageUrl')) # logger.info('The image list 0is %s' % filenames) self.update_state(state='PROCESSING', meta={ 'stage': 'generate match points', 'processing_dir': processing_dir, 'total': len(imageList) }) generateMatchPoints(map(lambda x: x['localName'], localImageList), matchFilename, logger=logger, executable=visualsfm_exe) # cameras = []; # for image in imageList: # if 1: # #try: # [K, T, llh] = get_kto(image); # cameras.append({'image':image.id, 'K':K, 'tranformation': # T, 'origin':llh}) # #except: # pass # origin = numpy.median(origin, axis=0) # origin = [-92.215197, 37.648858, 268.599] scene = models.Scene.objects.get(id=sceneId).history(history) origin = list(scene.origin) if scene.geolocated: self.update_state(state='PROCESSING', meta={'stage': 'writing gcp points'}) #find the middle origin, and make it THE origin data = [] #.name .llh_xyz for imageInfo in localImageList: try: r = imageInfo['transformation'][0:3, 0:3] t = imageInfo['transformation'][0:3, 3:] enu_point = -r.transpose().dot(t) if not numpy.array_equal(imageInfo['enu_origin'], origin): ecef = enu.enu2xyz( refLong=imageInfo['enu_origin'][0], refLat=imageInfo['enu_origin'][1], refH=imageInfo['enu_origin'][2], #e=imageInfo['transformation'][0, 3], #n=imageInfo['transformation'][1, 3], #u=imageInfo['transformation'][2, 3]) e=enu_point[0], n=enu_point[1], u=enu_point[2]) enu_point = enu.xyz2enu(refLong=origin[0], refLat=origin[1], refH=origin[2], X=ecef[0], Y=ecef[1], Z=ecef[2]) # else: # enu_point = imageInfo['transformation'][0:3, 3]; dataBit = { 'filename': imageInfo['localName'], 'xyz': enu_point } data.append(dataBit) #Make this a separate ingest process, making CAMERAS linked to the #images #data = arducopter.loadAdjTaggedMetadata( # r'd:\visualsfm\2014-03-20 13-22-44_adj_tagged_images.txt'); #Make this read the cameras from the DB instead writeGcpFile(data, gcpFilename) except: #some images may have no camera pass oid.lvcsOrigin = str(origin) oid.save() self.update_state(state='PROCESSING', meta={'stage': 'sparse SFM'}) runSparse(matchFilename, sparce_filename, gcp=scene.geolocated, shared=True, logger=logger, executable=visualsfm_exe) self.update_state(state='FINALIZE', meta={'stage': 'loading resulting cameras'}) #prevent bundle2scene from getting confused and crashing sift_data = os.path.join(processing_dir, 'sift_data') os.mkdir(sift_data) for filename in glob(os.path.join(processing_dir, '*.mat')) +\ glob(os.path.join(processing_dir, '*.sift')): shutil.move(filename, sift_data) if scene.geolocated: #Create a uscene.xml for the geolocated case. All I want out of this is #the bounding box and gsd calculation. boxm2_adaptor.bundle2scene(sparce_filename, processing_dir, isalign=False, out_dir="") cams = readNvm(path_join(processing_dir, 'sparse.nvm')) #cams.sort(key=lambda x:x.name) #Since the file names are frame_00001, etc... and you KNOW this order is #identical to localImageList, with some missing for cam in cams: frameName = cam.name #frame_00001, etc.... imageInfo = filter( lambda x: x['localName'].endswith(frameName), localImageList)[0] #I have to use endswith instead of == because visual sfm APPARENTLY #decides to take some liberty and make absolute paths relative image = imageList[imageInfo['index']].history(history) (k, r, t) = cam.krt(width=image.imageWidth, height=image.imageHeight) logger.info('Origin is %s' % str(origin)) llh_xyz = enu.enu2llh(lon_origin=origin[0], lat_origin=origin[1], h_origin=origin[2], east=cam.translation_xyz[0], north=cam.translation_xyz[1], up=cam.translation_xyz[2]) grcs = models.GeoreferenceCoordinateSystem.create( name='%s 0' % image.name, xUnit='d', yUnit='d', zUnit='m', location='SRID=4326;POINT(%0.15f %0.15f %0.15f)' % (origin[0], origin[1], origin[2]), service_id=self.request.id) grcs.save() cs = models.CartesianCoordinateSystem.create( name='%s 1' % (image.name), service_id=self.request.id, xUnit='m', yUnit='m', zUnit='m') cs.save() transform = models.CartesianTransform.create( name='%s 1_0' % (image.name), service_id=self.request.id, rodriguezX=Point(*r[0, :]), rodriguezY=Point(*r[1, :]), rodriguezZ=Point(*r[2, :]), translation=Point(t[0][0], t[1][0], t[2][0]), coordinateSystem_from_id=grcs.id, coordinateSystem_to_id=cs.id) transform.save() camera = image.camera try: camera.update(service_id=self.request.id, focalLengthU=k[0, 0], focalLengthV=k[1, 1], principalPointU=k[0, 2], principalPointV=k[1, 2], coordinateSystem=cs) except: camera = models.Camera.create(name=image.name, service_id=self.request.id, focalLengthU=k[0, 0], focalLengthV=k[1, 1], principalPointU=k[0, 2], principalPointV=k[1, 2], coordinateSystem=cs) camera.save() image.update(camera=camera) logger.info(str(cams[0])) else: from vsi.tools.natural_sort import natural_sorted from glob import glob from vsi.io.krt import Krt from voxel_globe.tools.camera import save_krt boxm2_adaptor.bundle2scene(sparce_filename, processing_dir, isalign=True, out_dir=processing_dir) #While the output dir is used for the b2s folders, uscene.xml is cwd #They are both set to processing_dir, so everything works out well aligned_cams = glob(os.path.join(processing_dir, 'cams_krt', '*')) #sort them naturally in case there are more then 99,999 files aligned_cams = natural_sorted(aligned_cams) if len(aligned_cams) != len(imageList): #Create a new image collection new_image_collection = models.ImageCollection.create( name="SFM Result Subset (%s)" % image_collection.name, service_id=self.request.id) # for image in image_collection.images.all(): # new_image_collection.images.add(image) new_image_collection.save() frames_keep = set( map( lambda x: int(os.path.splitext(x.split('_')[-2])[0]) - 1, aligned_cams)) for frame_index in frames_keep: new_image_collection.images.add(imageList[frame_index]) # frames_remove = set(xrange(len(imageList))) - frames_keep # # for remove_index in list(frames_remove): # #The frame number refers to the nth image in the image collection, # #so frame_00100.tif is the 100th image, starting the index at one # #See local_name above # # #remove the images sfm threw away # new_image_collection.remove(imageList[remove_index]) image_collection = new_image_collection frames_keep = list(frames_keep) else: frames_keep = xrange(len(aligned_cams)) #---Update the camera models in the database.--- for camera_index, frame_index in enumerate(frames_keep): krt = Krt.load(aligned_cams[camera_index]) image = imageList[frame_index].history(history) save_krt(self.request.id, image, krt.k, krt.r, krt.t, [0, 0, 0], srid=4326) #---Update scene information important for the no-metadata case --- scene_filename = os.path.join(processing_dir, 'model', 'uscene.xml') boxm_scene = boxm2_scene_adaptor.boxm2_scene_adaptor(scene_filename) scene.bbox_min = 'POINT(%0.15f %0.15f %0.15f)' % boxm_scene.bbox[0] scene.bbox_max = 'POINT(%0.15f %0.15f %0.15f)' % boxm_scene.bbox[1] #This is not a complete or good function really... but it will get me the #information I need. scene_dict = load_xml(scene_filename) block = scene_dict['block'] scene.default_voxel_size='POINT(%f %f %f)' % \ (float(block.at['dim_x']), float(block.at['dim_y']), float(block.at['dim_z'])) scene.save() return oid.id
def runVisualSfm(self, imageSetId, sceneId, cleanup=True): from voxel_globe.meta import models from os import environ as env from os.path import join as path_join import os import shutil import time from django.contrib.gis.geos import Point from .tools import writeNvm, writeGcpFile, generateMatchPoints, runSparse,\ readNvm import voxel_globe.tools from voxel_globe.tools.camera import get_kto, save_krt import voxel_globe.tools.enu as enu import numpy import boxm2_adaptor import boxm2_scene_adaptor from voxel_globe.tools.xml_dict import load_xml from django.contrib.gis.geos import Point from voxel_globe.tools.image import convert_image from distutils.spawn import find_executable from vsi.iglob import glob as glob self.update_state(state='INITIALIZE', meta={'stage':0}) #Make main temp dir and cd into it with voxel_globe.tools.task_dir('visualsfm', cd=True) as processing_dir: #Because visualsfm is so... bad, I need to copy it locally so I can #configure it visualsfm_exe = os.path.join(processing_dir, 'visualsfm') shutil.copy(find_executable('VisualSFM'), visualsfm_exe) with open(os.path.join(processing_dir, 'nv.ini'), 'w') as fid: fid.write('param_search_multiple_models 0\n') fid.write('param_use_siftgpu 2\n') matchFilename = path_join(processing_dir, 'match.nvm') sparce_filename = path_join(processing_dir, 'sparse.nvm') #This can NOT be changed in version 0.5.25 gcpFilename = matchFilename + '.gcp' logger.debug('Task %s is processing in %s' % (self.request.id, processing_dir)) image_set = models.ImageSet.objects.get( id=imageSetId) imageList = image_set.images.all() ### if 1: ### try: #Not fully integrated yet ### sift_gpu = siftgpu.SiftGPU() ### except: ### pass localImageList = [] for x in range(len(imageList)): #Download the image locally image = imageList[x] self.update_state(state='INITIALIZE', meta={'stage':'image fetch', 'i':x, 'total':len(imageList)}) imageName = image.filename_path extension = os.path.splitext(imageName)[1].lower() localName = path_join(processing_dir, 'frame_%05d%s' % (x+1, extension)) #lncp(imageName, localName) #Stupid VisualSFM dereferences symlinks, breaking this shutil.copyfile(imageName, localName) #Convert the image if necessary if extension not in ['.jpg', '.jpeg', '.pgm', '.ppm']: self.update_state(state='INITIALIZE', meta={'stage':'image convert', 'i':x, 'total':len(imageList)}) #Add code here to converty to jpg for visual sfm if extension in ['.png']:#'not implemented': from PIL import Image image_temp = Image.open(localName) # if len(image_temp.mode) > 1: #Stupid visual sfm is picky :( # new_local_name = os.path.splitext(localName)[0] + '.ppm' # else: # new_local_name = os.path.splitext(localName)[0] + '.pgm' new_local_name = os.path.splitext(localName)[0] + '.jpg' ###ingest.convert_image(localName, new_local_name, 'PNM') convert_image(localName, new_local_name, 'JPEG', options=('QUALITY=100',)) os.remove(localName) localName = new_local_name else: raise Exception('Unsupported file type') imageInfo = {'localName':localName, 'index':x} try: [K, T, llh] = get_kto(image) imageInfo['K_intrinsics'] = K imageInfo['transformation'] = T imageInfo['enu_origin'] = llh except: pass localImageList.append(imageInfo) ### if 1: ### try: #not fully integrated yet ### sift_gpu.create_sift(localName, os.path.splitext(localName)[0]+'.sift') ### except: ### pass # filenames = list(imageList.values_list('image_url')) # logger.info('The image list 0is %s' % filenames) self.update_state(state='PROCESSING', meta={'stage':'generate match points', 'processing_dir':processing_dir, 'total':len(imageList)}) pid = generateMatchPoints(map(lambda x:x['localName'], localImageList), matchFilename, logger=logger, executable=visualsfm_exe) old_mat=None old_sift=None #TODO: Replace with inotify to monitor directory while pid.poll() is None: mat = len(glob(os.path.join(processing_dir, '*.mat'), False)) sift = len(glob(os.path.join(processing_dir, '*.sift'), False)) if mat != old_mat or \ sift != old_sift: old_mat=mat old_sift=sift self.update_state(state='PROCESSING', meta={'stage':'generate match points', 'processing_dir':processing_dir, 'sift':sift, 'mat':mat, 'total':len(imageList)}) time.sleep(5) # cameras = [] # for image in imageList: # if 1: # #try: # [K, T, llh] = get_kto(image) # cameras.append({'image':image.id, 'K':K, 'tranformation': # T, 'origin':llh}) # #except: # pass # origin = numpy.median(origin, axis=0) # origin = [-92.215197, 37.648858, 268.599] scene = models.Scene.objects.get(id=sceneId) origin = list(scene.origin) if scene.geolocated: self.update_state(state='PROCESSING', meta={'stage':'writing gcp points'}) #find the middle origin, and make it THE origin data = []#.name .llh_xyz for imageInfo in localImageList: try: r = imageInfo['transformation'][0:3, 0:3] t = imageInfo['transformation'][0:3, 3:] enu_point = -r.transpose().dot(t) if not numpy.array_equal(imageInfo['enu_origin'], origin): ecef = enu.enu2xyz(refLong=imageInfo['enu_origin'][0], refLat=imageInfo['enu_origin'][1], refH=imageInfo['enu_origin'][2], #e=imageInfo['transformation'][0, 3], #n=imageInfo['transformation'][1, 3], #u=imageInfo['transformation'][2, 3]) e=enu_point[0], n=enu_point[1], u=enu_point[2]) enu_point = enu.xyz2enu(refLong=origin[0], refLat=origin[1], refH=origin[2], X=ecef[0], Y=ecef[1], Z=ecef[2]) # else: # enu_point = imageInfo['transformation'][0:3, 3] dataBit = {'filename':imageInfo['localName'], 'xyz':enu_point} data.append(dataBit) #Make this a separate ingest process, making CAMERAS linked to the #images #data = arducopter.loadAdjTaggedMetadata( # r'd:\visualsfm\2014-03-20 13-22-44_adj_tagged_images.txt') #Make this read the cameras from the DB instead writeGcpFile(data, gcpFilename) except: #some images may have no camera pass self.update_state(state='PROCESSING', meta={'stage':'sparse SFM'}) pid = runSparse(matchFilename, sparce_filename, gcp=scene.geolocated, shared=True, logger=logger, executable=visualsfm_exe) pid.wait() self.update_state(state='FINALIZE', meta={'stage':'loading resulting cameras'}) #prevent bundle2scene from getting confused and crashing sift_data = os.path.join(processing_dir, 'sift_data') os.mkdir(sift_data) for filename in glob(os.path.join(processing_dir, '*.mat'), False) +\ glob(os.path.join(processing_dir, '*.sift'), False): shutil.move(filename, sift_data) if scene.geolocated: #Create a uscene.xml for the geolocated case. All I want out of this is #the bounding box and gsd calculation. boxm2_adaptor.bundle2scene(sparce_filename, processing_dir, isalign=False, out_dir="") cams = readNvm(path_join(processing_dir, 'sparse.nvm')) #cams.sort(key=lambda x:x.name) #Since the file names are frame_00001, etc... and you KNOW this order is #identical to localImageList, with some missing camera_set = models.CameraSet(name="Visual SFM Geo %s" % image_set.name, service_id = self.request.id, images_id = imageSetId) camera_set.save() for cam in cams: frameName = cam.name #frame_00001, etc.... imageInfo = filter(lambda x: x['localName'].endswith(frameName), localImageList)[0] #I have to use endswith instead of == because visual sfm APPARENTLY #decides to take some liberty and make absolute paths relative image = imageList[imageInfo['index']] (k,r,t) = cam.krt(width=image.image_width, height=image.image_height) t = t.flatten() camera = save_krt(self.request.id, image, k, r, t, origin, srid=4326) camera_set.cameras.add(camera) else: from vsi.tools.natural_sort import natural_sorted from vsi.io.krt import Krt boxm2_adaptor.bundle2scene(sparce_filename, processing_dir, isalign=True, out_dir=processing_dir) #While the output dir is used for the b2s folders, uscene.xml is cwd #They are both set to processing_dir, so everything works out well aligned_cams = glob(os.path.join(processing_dir, 'cams_krt', '*')) #sort them naturally in case there are more then 99,999 files aligned_cams = natural_sorted(aligned_cams) if len(aligned_cams) != len(imageList): #Create a new image set new_image_set = models.ImageSet( name="SFM Result Subset (%s)" % image_set.name, service_id = self.request.id) # for image in image_set.images.all(): # new_image_set.images.add(image) new_image_set.save() frames_keep = set(map(lambda x: int(os.path.splitext(x.split('_')[-2])[0])-1, aligned_cams)) for frame_index in frames_keep: new_image_set.images.add(imageList[frame_index]) # frames_remove = set(xrange(len(imageList))) - frames_keep # # for remove_index in list(frames_remove): # #The frame number refers to the nth image in the image set, # #so frame_00100.tif is the 100th image, starting the index at one # #See local_name above # # #remove the images sfm threw away # new_image_set.remove(imageList[remove_index]) image_set = new_image_set frames_keep = list(frames_keep) else: frames_keep = xrange(len(aligned_cams)) camera_set = models.CameraSet(name="Visual SFM %s" % image_set.name, service_id = self.request.id, images_id = imageSetId) camera_set.save() #---Update the camera models in the database.--- for camera_index, frame_index in enumerate(frames_keep): krt = Krt.load(aligned_cams[camera_index]) image = imageList[frame_index] camera = save_krt(self.request.id, image, krt.k, krt.r, krt.t, [0,0,0], srid=4326) camera_set.cameras.add(camera) #---Update scene information important for the no-metadata case --- scene_filename = os.path.join(processing_dir, 'model', 'uscene.xml') boxm_scene = boxm2_scene_adaptor.boxm2_scene_adaptor(scene_filename) scene.bbox_min = Point(*boxm_scene.bbox[0]) scene.bbox_max = Point(*boxm_scene.bbox[1]) #This is not a complete or good function really... but it will get me the #information I need. scene_dict = load_xml(scene_filename) block = scene_dict['block'] scene.default_voxel_size=Point(float(block.at['dim_x']), float(block.at['dim_y']), float(block.at['dim_z'])) scene.save()