コード例 #1
0
ファイル: tasks.py プロジェクト: andyneff/voxel_globe
def height_map_error(self, image_id, history=None):
  
  import numpy as np

  import vpgl_adaptor
  
  from vsi.io.image import imread, GdalReader
  
  from voxel_globe.meta import models
  import voxel_globe.tools
  from voxel_globe.tools.celery import Popen

  from voxel_globe.tools.wget import download as wget

  tie_points_yxz = []
  control_points_yxz = []

  image = models.Image.objects.get(id=image_id).history(history)

  with voxel_globe.tools.task_dir('height_map_error_calculation', cd=True) as processing_dir:
    wget(image.originalImageUrl, image.original_filename, secret=True)
    height_reader =  GdalReader(image.original_filename, autoload=True)
    transform = height_reader.object.GetGeoTransform()
    height = height_reader.raster()

  tie_point_ids = set([x for imagen in models.Image.objects.filter(
      objectId=image.objectId) for x in imagen.tiepoint_set.all().values_list(
      'objectId', flat=True)])

  for tie_point_id in tie_point_ids:
    tie_point = models.TiePoint.objects.get(objectId=tie_point_id, newerVersion=None).history(history)

    if not tie_point.deleted:
      lla_xyz = models.ControlPoint.objects.get(objectId = tie_point.geoPoint.objectId, newerVersion=None).history(history).point.coords
      control_points_yxz.append([lla_xyz[x] for x in [1,0,2]])
      tie_points_yxz.append([transform[4]*(tie_point.point.coords[0]+0.5) + transform[5]*(tie_point.point.coords[1]+0.5) + transform[3],
                             transform[1]*(tie_point.point.coords[0]+0.5) + transform[2]*(tie_point.point.coords[1]+0.5) + transform[0],
                             height[tie_point.point.coords[1], tie_point.point.coords[0]]])

  origin_yxz = np.mean(np.array(control_points_yxz), axis=0)
  tie_points_local = []
  control_points_local = []
  lvcs = vpgl_adaptor.create_lvcs(origin_yxz[0], origin_yxz[1], origin_yxz[2], 'wgs84')

  for tie_point in tie_points_yxz:
    tie_points_local.append(vpgl_adaptor.convert_to_local_coordinates2(lvcs, *tie_point))

  for control_point in control_points_yxz:
    control_points_local.append(vpgl_adaptor.convert_to_local_coordinates2(lvcs, *control_point))

  error = np.linalg.norm(np.array(tie_points_local)-np.array(control_points_local), axis=0)/(len(tie_points_local)**0.5)

  result={}
  result['error'] = list(error)
  result['horizontal_accuracy'] = 2.4477*0.5*(error[0]+error[1])
  result['vertical_accuracy'] = 1.96*error[2]

  return result
コード例 #2
0
ファイル: tasks.py プロジェクト: andyneff/voxel_globe
def runVisualSfm(self, imageCollectionId, sceneId, cleanup=True, history=None):
    from voxel_globe.meta import models
    from voxel_globe.order.visualsfm.models import Order

    from os import environ as env
    from os.path import join as path_join
    import os
    import shutil

    from .tools import writeNvm, writeGcpFile, generateMatchPoints, runSparse,\
                       readNvm

    import voxel_globe.tools
    from voxel_globe.tools.wget import download as wget
    from voxel_globe.tools.camera import get_kto
    import voxel_globe.tools.enu as enu
    import numpy

    import boxm2_adaptor
    import boxm2_scene_adaptor
    from voxel_globe.tools.xml_dict import load_xml

    from django.contrib.gis.geos import Point
    from voxel_globe.tools.image import convert_image

    from distutils.spawn import find_executable

    from glob import glob

    self.update_state(state='INITIALIZE', meta={'stage': 0})

    #Make main temp dir and cd into it
    with voxel_globe.tools.task_dir('visualsfm', cd=True) as processing_dir:

        #Because visualsfm is so... bad, I have to copy it locally so I can
        #configure it
        visualsfm_exe = os.path.join(
            processing_dir, os.path.basename(os.environ['VIP_VISUALSFM_EXE']))
        shutil.copy(find_executable(os.environ['VIP_VISUALSFM_EXE']),
                    visualsfm_exe)
        with open(os.path.join(processing_dir, 'nv.ini'), 'w') as fid:
            fid.write('param_search_multiple_models 0\n')
            fid.write('param_use_siftgpu 2\n')

        matchFilename = path_join(processing_dir, 'match.nvm')
        sparce_filename = path_join(processing_dir, 'sparse.nvm')
        #This can NOT be changed in version 0.5.25
        gcpFilename = matchFilename + '.gcp'
        logger.debug('Task %s is processing in %s' %
                     (self.request.id, processing_dir))

        image_collection = models.ImageCollection.objects.get(
            id=imageCollectionId).history(history)
        imageList = image_collection.images.all()

        #A Little bit of database logging
        oid = Order(processingDir=processing_dir,
                    imageCollection=image_collection)

        ###    if 1:
        ###    try: #Not fully integrated yet
        ###      sift_gpu = siftgpu.SiftGPU()
        ###    except:
        ###      pass

        localImageList = []
        for x in range(len(imageList)):
            #Download the image locally
            image = imageList[x].history(history)
            self.update_state(state='INITIALIZE',
                              meta={
                                  'stage': 'image fetch',
                                  'i': x,
                                  'total': len(imageList)
                              })
            imageName = image.originalImageUrl
            extension = os.path.splitext(imageName)[1].lower()
            localName = path_join(processing_dir,
                                  'frame_%05d%s' % (x + 1, extension))
            wget(imageName, localName, secret=True)

            #Convert the image if necessary
            if extension not in ['.jpg', '.jpeg', '.pgm', '.ppm']:
                self.update_state(state='INITIALIZE',
                                  meta={
                                      'stage': 'image convert',
                                      'i': x,
                                      'total': len(imageList)
                                  })
                #Add code here to converty to jpg for visual sfm
                if extension in ['.png']:  #'not implemented':
                    from PIL import Image
                    image_temp = Image.open(localName)
                    if len(image_temp.mode
                           ) > 1:  #Stupid visual sfm is picky :(
                        new_local_name = os.path.splitext(
                            localName)[0] + '.ppm'
                    else:
                        new_local_name = os.path.splitext(
                            localName)[0] + '.pgm'

                    new_local_name = os.path.splitext(localName)[0] + '.jpg'

                    ###ingest.convert_image(localName, new_local_name, 'PNM')
                    convert_image(localName,
                                  new_local_name,
                                  'JPEG',
                                  options=('QUALITY=100', ))
                    os.remove(localName)

                    localName = new_local_name

                else:
                    raise Exception('Unsupported file type')

            imageInfo = {'localName': localName, 'index': x}

            try:
                [K, T, llh] = get_kto(image, history=history)
                imageInfo['K_intrinsics'] = K
                imageInfo['transformation'] = T
                imageInfo['enu_origin'] = llh
            except:
                pass

            localImageList.append(imageInfo)
###      if 1:
###      try: #not fully integrated yet
###        sift_gpu.create_sift(localName, os.path.splitext(localName)[0]+'.sift')
###      except:
###        pass

#  filenames = list(imageList.values_list('imageUrl'))
#  logger.info('The image list 0is %s' % filenames)

        self.update_state(state='PROCESSING',
                          meta={
                              'stage': 'generate match points',
                              'processing_dir': processing_dir,
                              'total': len(imageList)
                          })
        generateMatchPoints(map(lambda x: x['localName'], localImageList),
                            matchFilename,
                            logger=logger,
                            executable=visualsfm_exe)

        #   cameras = [];
        #   for image in imageList:
        #     if 1:
        #     #try:
        #       [K, T, llh] = get_kto(image);
        #       cameras.append({'image':image.id, 'K':K, 'tranformation':
        #                       T, 'origin':llh})
        #     #except:
        #       pass

        #  origin = numpy.median(origin, axis=0)
        #  origin = [-92.215197, 37.648858, 268.599]
        scene = models.Scene.objects.get(id=sceneId).history(history)
        origin = list(scene.origin)

        if scene.geolocated:
            self.update_state(state='PROCESSING',
                              meta={'stage': 'writing gcp points'})

            #find the middle origin, and make it THE origin
            data = []  #.name .llh_xyz
            for imageInfo in localImageList:
                try:
                    r = imageInfo['transformation'][0:3, 0:3]
                    t = imageInfo['transformation'][0:3, 3:]
                    enu_point = -r.transpose().dot(t)

                    if not numpy.array_equal(imageInfo['enu_origin'], origin):
                        ecef = enu.enu2xyz(
                            refLong=imageInfo['enu_origin'][0],
                            refLat=imageInfo['enu_origin'][1],
                            refH=imageInfo['enu_origin'][2],
                            #e=imageInfo['transformation'][0, 3],
                            #n=imageInfo['transformation'][1, 3],
                            #u=imageInfo['transformation'][2, 3])
                            e=enu_point[0],
                            n=enu_point[1],
                            u=enu_point[2])
                        enu_point = enu.xyz2enu(refLong=origin[0],
                                                refLat=origin[1],
                                                refH=origin[2],
                                                X=ecef[0],
                                                Y=ecef[1],
                                                Z=ecef[2])
        #      else:
        #        enu_point = imageInfo['transformation'][0:3, 3];

                    dataBit = {
                        'filename': imageInfo['localName'],
                        'xyz': enu_point
                    }
                    data.append(dataBit)

                    #Make this a separate ingest process, making CAMERAS linked to the
                    #images
                    #data = arducopter.loadAdjTaggedMetadata(
                    #    r'd:\visualsfm\2014-03-20 13-22-44_adj_tagged_images.txt');
                    #Make this read the cameras from the DB instead
                    writeGcpFile(data, gcpFilename)

                except:  #some images may have no camera
                    pass

        oid.lvcsOrigin = str(origin)
        oid.save()

        self.update_state(state='PROCESSING', meta={'stage': 'sparse SFM'})
        runSparse(matchFilename,
                  sparce_filename,
                  gcp=scene.geolocated,
                  shared=True,
                  logger=logger,
                  executable=visualsfm_exe)

        self.update_state(state='FINALIZE',
                          meta={'stage': 'loading resulting cameras'})

        #prevent bundle2scene from getting confused and crashing
        sift_data = os.path.join(processing_dir, 'sift_data')
        os.mkdir(sift_data)
        for filename in glob(os.path.join(processing_dir, '*.mat')) +\
                        glob(os.path.join(processing_dir, '*.sift')):
            shutil.move(filename, sift_data)

        if scene.geolocated:
            #Create a uscene.xml for the geolocated case. All I want out of this is
            #the bounding box and gsd calculation.
            boxm2_adaptor.bundle2scene(sparce_filename,
                                       processing_dir,
                                       isalign=False,
                                       out_dir="")

            cams = readNvm(path_join(processing_dir, 'sparse.nvm'))
            #cams.sort(key=lambda x:x.name)
            #Since the file names are frame_00001, etc... and you KNOW this order is
            #identical to localImageList, with some missing
            for cam in cams:
                frameName = cam.name
                #frame_00001, etc....
                imageInfo = filter(
                    lambda x: x['localName'].endswith(frameName),
                    localImageList)[0]
                #I have to use endswith instead of == because visual sfm APPARENTLY
                #decides to take some liberty and make absolute paths relative
                image = imageList[imageInfo['index']].history(history)

                (k, r, t) = cam.krt(width=image.imageWidth,
                                    height=image.imageHeight)
                logger.info('Origin is %s' % str(origin))
                llh_xyz = enu.enu2llh(lon_origin=origin[0],
                                      lat_origin=origin[1],
                                      h_origin=origin[2],
                                      east=cam.translation_xyz[0],
                                      north=cam.translation_xyz[1],
                                      up=cam.translation_xyz[2])

                grcs = models.GeoreferenceCoordinateSystem.create(
                    name='%s 0' % image.name,
                    xUnit='d',
                    yUnit='d',
                    zUnit='m',
                    location='SRID=4326;POINT(%0.15f %0.15f %0.15f)' %
                    (origin[0], origin[1], origin[2]),
                    service_id=self.request.id)
                grcs.save()
                cs = models.CartesianCoordinateSystem.create(
                    name='%s 1' % (image.name),
                    service_id=self.request.id,
                    xUnit='m',
                    yUnit='m',
                    zUnit='m')
                cs.save()

                transform = models.CartesianTransform.create(
                    name='%s 1_0' % (image.name),
                    service_id=self.request.id,
                    rodriguezX=Point(*r[0, :]),
                    rodriguezY=Point(*r[1, :]),
                    rodriguezZ=Point(*r[2, :]),
                    translation=Point(t[0][0], t[1][0], t[2][0]),
                    coordinateSystem_from_id=grcs.id,
                    coordinateSystem_to_id=cs.id)
                transform.save()

                camera = image.camera
                try:
                    camera.update(service_id=self.request.id,
                                  focalLengthU=k[0, 0],
                                  focalLengthV=k[1, 1],
                                  principalPointU=k[0, 2],
                                  principalPointV=k[1, 2],
                                  coordinateSystem=cs)
                except:
                    camera = models.Camera.create(name=image.name,
                                                  service_id=self.request.id,
                                                  focalLengthU=k[0, 0],
                                                  focalLengthV=k[1, 1],
                                                  principalPointU=k[0, 2],
                                                  principalPointV=k[1, 2],
                                                  coordinateSystem=cs)
                    camera.save()
                    image.update(camera=camera)

            logger.info(str(cams[0]))
        else:
            from vsi.tools.natural_sort import natural_sorted
            from glob import glob

            from vsi.io.krt import Krt
            from voxel_globe.tools.camera import save_krt

            boxm2_adaptor.bundle2scene(sparce_filename,
                                       processing_dir,
                                       isalign=True,
                                       out_dir=processing_dir)
            #While the output dir is used for the b2s folders, uscene.xml is cwd
            #They are both set to processing_dir, so everything works out well
            aligned_cams = glob(os.path.join(processing_dir, 'cams_krt', '*'))
            #sort them naturally in case there are more then 99,999 files
            aligned_cams = natural_sorted(aligned_cams)
            if len(aligned_cams) != len(imageList):
                #Create a new image collection
                new_image_collection = models.ImageCollection.create(
                    name="SFM Result Subset (%s)" % image_collection.name,
                    service_id=self.request.id)
                #        for image in image_collection.images.all():
                #          new_image_collection.images.add(image)
                new_image_collection.save()

                frames_keep = set(
                    map(
                        lambda x: int(os.path.splitext(x.split('_')[-2])[0]) -
                        1, aligned_cams))

                for frame_index in frames_keep:
                    new_image_collection.images.add(imageList[frame_index])


#        frames_remove = set(xrange(len(imageList))) - frames_keep
#
#        for remove_index in list(frames_remove):
#          #The frame number refers to the nth image in the image collection,
#          #so frame_00100.tif is the 100th image, starting the index at one
#          #See local_name above
#
#          #remove the images sfm threw away
#          new_image_collection.remove(imageList[remove_index])
                image_collection = new_image_collection
                frames_keep = list(frames_keep)
            else:
                frames_keep = xrange(len(aligned_cams))

            #---Update the camera models in the database.---
            for camera_index, frame_index in enumerate(frames_keep):
                krt = Krt.load(aligned_cams[camera_index])
                image = imageList[frame_index].history(history)
                save_krt(self.request.id,
                         image,
                         krt.k,
                         krt.r,
                         krt.t, [0, 0, 0],
                         srid=4326)

            #---Update scene information important for the no-metadata case ---

        scene_filename = os.path.join(processing_dir, 'model', 'uscene.xml')
        boxm_scene = boxm2_scene_adaptor.boxm2_scene_adaptor(scene_filename)

        scene.bbox_min = 'POINT(%0.15f %0.15f %0.15f)' % boxm_scene.bbox[0]
        scene.bbox_max = 'POINT(%0.15f %0.15f %0.15f)' % boxm_scene.bbox[1]

        #This is not a complete or good function really... but it will get me the
        #information I need.
        scene_dict = load_xml(scene_filename)
        block = scene_dict['block']

        scene.default_voxel_size='POINT(%f %f %f)' % \
            (float(block.at['dim_x']), float(block.at['dim_y']),
             float(block.at['dim_z']))
        scene.save()

    return oid.id
コード例 #3
0
ファイル: tasks.py プロジェクト: andyneff/voxel_globe
def runVisualSfm(self, imageCollectionId, sceneId, cleanup=True, history=None):
  from voxel_globe.meta import models
  from voxel_globe.order.visualsfm.models import Order

  from os import environ as env
  from os.path import join as path_join
  import os
  import shutil
  
  from .tools import writeNvm, writeGcpFile, generateMatchPoints, runSparse,\
                     readNvm
  
  import voxel_globe.tools
  from voxel_globe.tools.wget import download as wget
  from voxel_globe.tools.camera import get_kto
  import voxel_globe.tools.enu as enu
  import numpy

  import boxm2_adaptor
  import boxm2_scene_adaptor
  from voxel_globe.tools.xml_dict import load_xml
  
  from django.contrib.gis.geos import Point
  from voxel_globe.tools.image import convert_image

  from distutils.spawn import find_executable

  from glob import glob
  
  self.update_state(state='INITIALIZE', meta={'stage':0})

  #Make main temp dir and cd into it
  with voxel_globe.tools.task_dir('visualsfm', cd=True) as processing_dir:

    #Because visualsfm is so... bad, I have to copy it locally so I can
    #configure it
    visualsfm_exe = os.path.join(processing_dir, 
        os.path.basename(os.environ['VIP_VISUALSFM_EXE']))
    shutil.copy(find_executable(os.environ['VIP_VISUALSFM_EXE']), 
                visualsfm_exe)
    with open(os.path.join(processing_dir, 'nv.ini'), 'w') as fid:
      fid.write('param_search_multiple_models 0\n')
      fid.write('param_use_siftgpu 2\n')

    matchFilename = path_join(processing_dir, 'match.nvm');
    sparce_filename = path_join(processing_dir, 'sparse.nvm');
    #This can NOT be changed in version 0.5.25  
    gcpFilename = matchFilename + '.gcp'
    logger.debug('Task %s is processing in %s' % (self.request.id, 
                                                  processing_dir))

    image_collection = models.ImageCollection.objects.get(
        id=imageCollectionId).history(history);
    imageList = image_collection.images.all();

    #A Little bit of database logging
    oid = Order(processingDir=processing_dir, imageCollection=image_collection)

###    if 1:
###    try: #Not fully integrated yet
###      sift_gpu = siftgpu.SiftGPU()
###    except:
###      pass

    localImageList = [];
    for x in range(len(imageList)):
      #Download the image locally
      image = imageList[x].history(history);
      self.update_state(state='INITIALIZE', meta={'stage':'image fetch', 'i':x,
                                                  'total':len(imageList)})
      imageName = image.originalImageUrl;
      extension = os.path.splitext(imageName)[1].lower()
      localName = path_join(processing_dir, 'frame_%05d%s' % (x+1, extension));
      wget(imageName, localName, secret=True)
  
      #Convert the image if necessary    
      if extension not in ['.jpg', '.jpeg', '.pgm', '.ppm']:
        self.update_state(state='INITIALIZE', 
            meta={'stage':'image convert', 'i':x, 'total':len(imageList)})
        #Add code here to converty to jpg for visual sfm
        if extension in ['.png']:#'not implemented':
          from PIL import Image
          image_temp = Image.open(localName)
          if len(image_temp.mode) > 1: #Stupid visual sfm is picky :(
            new_local_name = os.path.splitext(localName)[0] + '.ppm';
          else:
            new_local_name = os.path.splitext(localName)[0] + '.pgm';

          new_local_name = os.path.splitext(localName)[0] + '.jpg';

          ###ingest.convert_image(localName, new_local_name, 'PNM')
          convert_image(localName, new_local_name, 'JPEG', 
                        options=('QUALITY=100',))
          os.remove(localName)

          localName = new_local_name;

        else:
          raise Exception('Unsupported file type');
        
      imageInfo = {'localName':localName, 'index':x}
  
      try:
        [K, T, llh] = get_kto(image, history=history);
        imageInfo['K_intrinsics'] = K;
        imageInfo['transformation'] = T;
        imageInfo['enu_origin'] = llh;
      except:
        pass
  
      localImageList.append(imageInfo);
###      if 1:
###      try: #not fully integrated yet
###        sift_gpu.create_sift(localName, os.path.splitext(localName)[0]+'.sift')
###      except:
###        pass

  #  filenames = list(imageList.values_list('imageUrl'))
  #  logger.info('The image list 0is %s' % filenames)

    self.update_state(state='PROCESSING', 
                      meta={'stage':'generate match points', 
                            'processing_dir':processing_dir,
                            'total':len(imageList)})
    generateMatchPoints(map(lambda x:x['localName'], localImageList),
                        matchFilename, logger=logger, executable=visualsfm_exe)

  #   cameras = [];
  #   for image in imageList:
  #     if 1:
  #     #try:
  #       [K, T, llh] = get_kto(image);
  #       cameras.append({'image':image.id, 'K':K, 'tranformation':
  #                       T, 'origin':llh})
  #     #except:
  #       pass  
  
  #  origin = numpy.median(origin, axis=0)
  #  origin = [-92.215197, 37.648858, 268.599]
    scene = models.Scene.objects.get(id=sceneId).history(history)
    origin = list(scene.origin)

    if scene.geolocated:
      self.update_state(state='PROCESSING', 
                        meta={'stage':'writing gcp points'})

      #find the middle origin, and make it THE origin
      data = []#.name .llh_xyz
      for imageInfo in localImageList:
        try:
          r = imageInfo['transformation'][0:3, 0:3]
          t = imageInfo['transformation'][0:3, 3:]
          enu_point = -r.transpose().dot(t);
    
          if not numpy.array_equal(imageInfo['enu_origin'], origin):
            ecef = enu.enu2xyz(refLong=imageInfo['enu_origin'][0],
                               refLat=imageInfo['enu_origin'][1],
                               refH=imageInfo['enu_origin'][2],
                               #e=imageInfo['transformation'][0, 3],
                               #n=imageInfo['transformation'][1, 3],
                               #u=imageInfo['transformation'][2, 3])
                               e=enu_point[0],
                               n=enu_point[1],
                               u=enu_point[2])
            enu_point = enu.xyz2enu(refLong=origin[0], 
                                    refLat=origin[1], 
                                    refH=origin[2],
                                    X=ecef[0],
                                    Y=ecef[1],
                                    Z=ecef[2])
    #      else:
    #        enu_point = imageInfo['transformation'][0:3, 3];
          
          dataBit = {'filename':imageInfo['localName'], 'xyz':enu_point}
          data.append(dataBit);
          
          #Make this a separate ingest process, making CAMERAS linked to the 
          #images
          #data = arducopter.loadAdjTaggedMetadata(
          #    r'd:\visualsfm\2014-03-20 13-22-44_adj_tagged_images.txt');
          #Make this read the cameras from the DB instead
          writeGcpFile(data, gcpFilename)

        except: #some images may have no camera 
          pass
    
    oid.lvcsOrigin = str(origin)
    oid.save()
 
    self.update_state(state='PROCESSING', meta={'stage':'sparse SFM'})
    runSparse(matchFilename, sparce_filename, gcp=scene.geolocated, 
              shared=True, logger=logger, executable=visualsfm_exe)
  
    self.update_state(state='FINALIZE', 
                      meta={'stage':'loading resulting cameras'})

    #prevent bundle2scene from getting confused and crashing
    sift_data = os.path.join(processing_dir, 'sift_data')
    os.mkdir(sift_data)
    for filename in glob(os.path.join(processing_dir, '*.mat')) +\
                    glob(os.path.join(processing_dir, '*.sift')):
      shutil.move(filename, sift_data)

    if scene.geolocated:
      #Create a uscene.xml for the geolocated case. All I want out of this is
      #the bounding box and gsd calculation.
      boxm2_adaptor.bundle2scene(sparce_filename, processing_dir, isalign=False,
                                 out_dir="")

      cams = readNvm(path_join(processing_dir, 'sparse.nvm'))
      #cams.sort(key=lambda x:x.name)
      #Since the file names are frame_00001, etc... and you KNOW this order is
      #identical to localImageList, with some missing
      for cam in cams:
        frameName = cam.name; #frame_00001, etc....
        imageInfo = filter(lambda x: x['localName'].endswith(frameName),
                           localImageList)[0]
        #I have to use endswith instead of == because visual sfm APPARENTLY 
        #decides to take some liberty and make absolute paths relative
        image = imageList[imageInfo['index']].history(history)
    
        (k,r,t) = cam.krt(width=image.imageWidth, height=image.imageHeight);
        logger.info('Origin is %s' % str(origin))
        llh_xyz = enu.enu2llh(lon_origin=origin[0], 
                              lat_origin=origin[1], 
                              h_origin=origin[2], 
                              east=cam.translation_xyz[0], 
                              north=cam.translation_xyz[1], 
                              up=cam.translation_xyz[2])
            
        grcs = models.GeoreferenceCoordinateSystem.create(
                        name='%s 0' % image.name,
                        xUnit='d', yUnit='d', zUnit='m',
                        location='SRID=4326;POINT(%0.15f %0.15f %0.15f)' 
                                  % (origin[0], origin[1], origin[2]),
                        service_id = self.request.id)
        grcs.save()
        cs = models.CartesianCoordinateSystem.create(
                        name='%s 1' % (image.name),
                        service_id = self.request.id,
                        xUnit='m', yUnit='m', zUnit='m');
        cs.save()

        transform = models.CartesianTransform.create(
                             name='%s 1_0' % (image.name),
                             service_id = self.request.id,
                             rodriguezX=Point(*r[0,:]),
                             rodriguezY=Point(*r[1,:]),
                             rodriguezZ=Point(*r[2,:]),
                             translation=Point(t[0][0], t[1][0], t[2][0]),
                             coordinateSystem_from_id=grcs.id,
                             coordinateSystem_to_id=cs.id)
        transform.save()
        
        camera = image.camera;
        try:
          camera.update(service_id = self.request.id,
                        focalLengthU=k[0,0],   focalLengthV=k[1,1],
                        principalPointU=k[0,2], principalPointV=k[1,2],
                        coordinateSystem=cs);
        except:
          camera = models.Camera.create(name=image.name,
                        service_id = self.request.id,
                        focalLengthU=k[0,0],   focalLengthV=k[1,1],
                        principalPointU=k[0,2], principalPointV=k[1,2],
                        coordinateSystem=cs);
          camera.save();
          image.update(camera = camera);
    
      logger.info(str(cams[0]))
    else:
      from vsi.tools.natural_sort import natural_sorted 
      from glob import glob
      
      from vsi.io.krt import Krt
      from voxel_globe.tools.camera import save_krt
      
      boxm2_adaptor.bundle2scene(sparce_filename, processing_dir, isalign=True,
                                 out_dir=processing_dir)
      #While the output dir is used for the b2s folders, uscene.xml is cwd
      #They are both set to processing_dir, so everything works out well
      aligned_cams = glob(os.path.join(processing_dir, 'cams_krt', '*'))
      #sort them naturally in case there are more then 99,999 files
      aligned_cams = natural_sorted(aligned_cams) 
      if len(aligned_cams) != len(imageList):
        #Create a new image collection
        new_image_collection = models.ImageCollection.create(
            name="SFM Result Subset (%s)" % image_collection.name, 
            service_id = self.request.id);
#        for image in image_collection.images.all():
#          new_image_collection.images.add(image)
        new_image_collection.save();

        frames_keep = set(map(lambda x:
            int(os.path.splitext(x.split('_')[-2])[0])-1, aligned_cams))

        for frame_index in frames_keep:
          new_image_collection.images.add(imageList[frame_index])

#        frames_remove = set(xrange(len(imageList))) - frames_keep 
#
#        for remove_index in list(frames_remove):
#          #The frame number refers to the nth image in the image collection,
#          #so frame_00100.tif is the 100th image, starting the index at one
#          #See local_name above
#          
#          #remove the images sfm threw away 
#          new_image_collection.remove(imageList[remove_index])
        image_collection = new_image_collection
        frames_keep = list(frames_keep)
      else:
        frames_keep = xrange(len(aligned_cams))
      
      #---Update the camera models in the database.---
      for camera_index, frame_index in enumerate(frames_keep):
        krt = Krt.load(aligned_cams[camera_index])
        image = imageList[frame_index].history(history)
        save_krt(self.request.id, image, krt.k, krt.r, krt.t, [0,0,0], 
                 srid=4326)

      #---Update scene information important for the no-metadata case ---

    scene_filename = os.path.join(processing_dir, 'model', 'uscene.xml')
    boxm_scene = boxm2_scene_adaptor.boxm2_scene_adaptor(scene_filename)

    scene.bbox_min = 'POINT(%0.15f %0.15f %0.15f)' % boxm_scene.bbox[0]
    scene.bbox_max = 'POINT(%0.15f %0.15f %0.15f)' % boxm_scene.bbox[1]

    #This is not a complete or good function really... but it will get me the
    #information I need.
    scene_dict = load_xml(scene_filename)
    block = scene_dict['block']

    scene.default_voxel_size='POINT(%f %f %f)' % \
        (float(block.at['dim_x']), float(block.at['dim_y']),
         float(block.at['dim_z']))
    scene.save()

  return oid.id;
コード例 #4
0
def run_build_voxel_model_bp(self,
                             image_collection_id,
                             scene_id,
                             bbox,
                             skip_frames,
                             cleanup=True,
                             history=None):
    from distutils.dir_util import remove_tree
    from shutil import move
    import random

    from vsi.tools.redirect import StdRedirect, Logger as LoggerWrapper
    from voxel_globe.meta import models
    from voxel_globe.tools.camera import get_krt
    import voxel_globe.tools

    from boxm2_scene_adaptor import boxm2_scene_adaptor

    from vil_adaptor import load_image
    from vpgl_adaptor import load_perspective_camera
    from voxel_globe.tools.wget import download as wget

    from vsi.vxl.create_scene_xml import create_scene_xml

    from vsi.tools.dir_util import copytree

    with StdRedirect(
            open(
                os.path.join(voxel_globe.tools.log_dir(), self.request.id) +
                '_out.log', 'w'),
            open(
                os.path.join(voxel_globe.tools.log_dir(), self.request.id) +
                '_err.log', 'w')):

        openclDevice = os.environ['VIP_OPENCL_DEVICE']
        opencl_memory = os.environ.get('VIP_OPENCL_MEMORY', None)
        if opencl_memory:
            opencl_memory = int(opencl_memory)

        scene = models.Scene.objects.get(id=scene_id)

        imageCollection = models.ImageCollection.objects.get(\
            id=image_collection_id).history(history)
        imageList = imageCollection.images.all()

        with voxel_globe.tools.task_dir('voxel_world') as processing_dir:

            logger.warning(bbox)

            if bbox['geolocated']:
                create_scene_xml(
                    openclDevice,
                    3,
                    float(bbox['voxel_size']),
                    lla1=(float(bbox['x_min']), float(bbox['y_min']),
                          float(bbox['z_min'])),
                    lla2=(float(bbox['x_max']), float(bbox['y_max']),
                          float(bbox['z_max'])),
                    origin=scene.origin,
                    model_dir='.',
                    number_bins=1,
                    output_file=open(os.path.join(processing_dir, 'scene.xml'),
                                     'w'),
                    n_bytes_gpu=opencl_memory)
            else:
                create_scene_xml(
                    openclDevice,
                    3,
                    float(bbox['voxel_size']),
                    lvcs1=(float(bbox['x_min']), float(bbox['y_min']),
                           float(bbox['z_min'])),
                    lvcs2=(float(bbox['x_max']), float(bbox['y_max']),
                           float(bbox['z_max'])),
                    origin=scene.origin,
                    model_dir='.',
                    number_bins=1,
                    output_file=open(os.path.join(processing_dir, 'scene.xml'),
                                     'w'),
                    n_bytes_gpu=opencl_memory)

            counter = 1

            imageNames = []
            cameraNames = []

            os.mkdir(os.path.join(processing_dir, 'local'))

            #Prepping
            for image in imageList:
                self.update_state(state='INITIALIZE',
                                  meta={
                                      'stage': 'image fetch',
                                      'i': counter,
                                      'total': len(imageList)
                                  })
                image = image.history(history)
                (K, R, T, o) = get_krt(image.history(history), history=history)

                krtName = os.path.join(processing_dir, 'local',
                                       'frame_%05d.krt' % counter)

                with open(krtName, 'w') as fid:
                    print >> fid, (("%0.18f " * 3 + "\n") * 3) % (
                        K[0, 0], K[0, 1], K[0, 2], K[1, 0], K[1, 1], K[1, 2],
                        K[2, 0], K[2, 1], K[2, 2])
                    print >> fid, (("%0.18f " * 3 + "\n") * 3) % (
                        R[0, 0], R[0, 1], R[0, 2], R[1, 0], R[1, 1], R[1, 2],
                        R[2, 0], R[2, 1], R[2, 2])

                    print >> fid, ("%0.18f " * 3 + "\n") % (T[0, 0], T[1, 0],
                                                            T[2, 0])

                imageName = image.originalImageUrl
                extension = os.path.splitext(imageName)[1]
                localName = os.path.join(processing_dir, 'local',
                                         'frame_%05d%s' % (counter, extension))
                wget(imageName, localName, secret=True)

                counter += 1

                imageNames.append(localName)
                cameraNames.append(krtName)

            variance = 0.06

            vxl_scene = boxm2_scene_adaptor(
                os.path.join(processing_dir, "scene.xml"), openclDevice)

            current_level = 0

            loaded_imgs = []
            loaded_cams = []

            for i in range(0, len(imageNames), skip_frames):
                logger.debug("i: %d img name: %s cam name: %s", i,
                             imageNames[i], cameraNames[i])
                self.update_state(state='PRELOADING',
                                  meta={
                                      'stage': 'image load',
                                      'i': i,
                                      'total': len(imageNames)
                                  })
                img, ni, nj = load_image(imageNames[i])
                loaded_imgs.append(img)
                pcam = load_perspective_camera(cameraNames[i])
                loaded_cams.append(pcam)

            refine_cnt = 5
            refine_device = openclDevice[0:3]
            if refine_device == 'cpu':
                refine_device = 'cpp'

            for rfk in range(0, refine_cnt, 1):
                pair = zip(loaded_imgs, loaded_cams)
                random.shuffle(pair)
                for idx, (img, cam) in enumerate(pair):
                    self.update_state(state='PROCESSING',
                                      meta={
                                          'stage': 'update',
                                          'i': rfk + 1,
                                          'total': refine_cnt,
                                          'image': idx + 1,
                                          'images': len(loaded_imgs)
                                      })
                    logger.debug("refine_cnt: %d, idx: %d", rfk, idx)
                    vxl_scene.update(cam,
                                     img,
                                     True,
                                     True,
                                     None,
                                     openclDevice[0:3],
                                     variance,
                                     tnear=1000.0,
                                     tfar=100000.0)

                for bp_index in range(2):
                    pass  #height map update?

                logger.debug("writing cache: %d", rfk)
                vxl_scene.write_cache()
                logger.debug("wrote cache: %d", rfk)

                if rfk < refine_cnt - 1:
                    self.update_state(state='PROCESSING',
                                      meta={
                                          'stage': 'refine',
                                          'i': rfk,
                                          'total': refine_cnt
                                      })
                    logger.debug("refining %d...", rfk)
                    vxl_scene.refine(0.3, refine_device)
                    vxl_scene.write_cache()

            with open(os.path.join(processing_dir, "scene_color.xml"),
                      'w') as fid:
                lines = open(os.path.join(processing_dir, "scene.xml"),
                             'r').readlines()
                lines = [
                    line.replace('boxm2_mog3_grey', 'boxm2_gauss_rgb').replace(
                        'boxm2_num_obs', 'boxm2_num_obs_single')
                    for line in lines
                ]
                fid.writelines(lines)

            vxl_scene = boxm2_scene_adaptor(
                os.path.join(processing_dir, "scene_color.xml"), openclDevice)

            for idx, (img, cam) in enumerate(pair):
                self.update_state(state='PROCESSING',
                                  meta={
                                      'stage': 'color_update',
                                      'i': rfk + 1,
                                      'total': refine_cnt,
                                      'image': idx + 1,
                                      'images': len(loaded_imgs)
                                  })
                logger.debug("color_paint idx: %d", idx)
                vxl_scene.update(cam,
                                 img,
                                 False,
                                 False,
                                 None,
                                 openclDevice[0:3],
                                 tnear=1000.0,
                                 tfar=100000.0)

            vxl_scene.write_cache()

            with voxel_globe.tools.storage_dir(
                    'voxel_world') as voxel_world_dir:
                copytree(processing_dir,
                         voxel_world_dir,
                         ignore=lambda x, y: ['images'])
                models.VoxelWorld.create(
                    name='%s world (%s)' %
                    (imageCollection.name, self.request.id),
                    origin=scene.origin,
                    directory=voxel_world_dir,
                    service_id=self.request.id).save()
コード例 #5
0
ファイル: tasks.py プロジェクト: andyneff/voxel-globe
def run_build_voxel_model(self, image_collection_id, scene_id, bbox, 
                          skip_frames, cleanup=True, history=None):
  from distutils.dir_util import remove_tree
  from shutil import move
  import random

  from vsi.tools.redirect import Redirect, Logger as LoggerWrapper
  from voxel_globe.meta import models
  from voxel_globe.tools.camera import get_krt
  import voxel_globe.tools

  from boxm2_scene_adaptor import boxm2_scene_adaptor

  from vil_adaptor import load_image
  from vpgl_adaptor import load_perspective_camera
  from voxel_globe.tools.wget import download as wget

  from vsi.vxl.create_scene_xml import create_scene_xml

  from vsi.tools.dir_util import copytree, mkdtemp

  with Redirect(stdout_c=LoggerWrapper(logger, lvl=logging.INFO),
                stderr_c=LoggerWrapper(logger, lvl=logging.WARNING)):
    
    openclDevice = os.environ['VIP_OPENCL_DEVICE']
    opencl_memory = os.environ.get('VIP_OPENCL_MEMORY', None)
    
    scene = models.Scene.objects.get(id=scene_id)
    
    imageCollection = models.ImageCollection.objects.get(\
        id=image_collection_id).history(history);
    imageList = imageCollection.images.all();

    with voxel_globe.tools.task_dir('voxel_world') as processing_dir:

      logger.warning(bbox)

      if bbox['geolocated']:
        create_scene_xml(openclDevice, 3, float(bbox['voxel_size']), 
            lla1=(float(bbox['x_min']), float(bbox['y_min']), 
                  float(bbox['z_min'])), 
            lla2=(float(bbox['x_max']), float(bbox['y_max']), 
                  float(bbox['z_max'])),
            origin=scene.origin, model_dir='.', number_bins=1,
            output_file=open(os.path.join(processing_dir, 'scene.xml'), 'w'),
            n_bytes_gpu=opencl_memory)
      else:
        create_scene_xml(openclDevice, 3, float(bbox['voxel_size']), 
            lvcs1=(float(bbox['x_min']), float(bbox['y_min']), 
                   float(bbox['z_min'])), 
            lvcs2=(float(bbox['x_max']), float(bbox['y_max']), 
                   float(bbox['z_max'])),
            origin=scene.origin, model_dir='.', number_bins=1,
            output_file=open(os.path.join(processing_dir, 'scene.xml'), 'w'),
            n_bytes_gpu=opencl_memory)

      counter = 1;
      
      imageNames = []
      cameraNames = []

      os.mkdir(os.path.join(processing_dir, 'local'))
      
      #Prepping
      for image in imageList:
        self.update_state(state='INITIALIZE', meta={'stage':'image fetch', 
                                                    'i':counter, 
                                                    'total':len(imageList)})
        image = image.history(history)
        (K,R,T,o) = get_krt(image.history(history), history=history)
        
        krtName = os.path.join(processing_dir, 'local', 'frame_%05d.krt' % counter)
        
        with open(krtName, 'w') as fid:
          print >>fid, (("%0.18f "*3+"\n")*3) % (K[0,0], K[0,1], K[0,2], 
              K[1,0], K[1,1], K[1,2], K[2,0], K[2,1], K[2,2]);
          print >>fid, (("%0.18f "*3+"\n")*3) % (R[0,0], R[0,1], R[0,2], 
              R[1,0], R[1,1], R[1,2], R[2,0], R[2,1], R[2,2]);
    
          print >>fid, ("%0.18f "*3+"\n") % (T[0,0], T[1,0], T[2,0]);
        
        imageName = image.originalImageUrl;
        extension = os.path.splitext(imageName)[1]
        localName = os.path.join(processing_dir, 'local', 
                                 'frame_%05d%s' % (counter, extension));
        wget(imageName, localName, secret=True)
        
        counter += 1;
      
        imageNames.append(localName)
        cameraNames.append(krtName)
        
      variance = 0.06
      
      vxl_scene = boxm2_scene_adaptor(os.path.join(processing_dir, "scene.xml"),
                                  openclDevice);
    
      current_level = 0;
    
      loaded_imgs = [];
      loaded_cams = [];
    
      for i in range(0, len(imageNames), skip_frames):
        logger.debug("i: %d img name: %s cam name: %s", i, imageNames[i], 
                     cameraNames[i])
        self.update_state(state='PRELOADING', meta={'stage':'image load', 
                                                    'i':i, 
                                                    'total':len(imageNames)})
        img, ni, nj = load_image(imageNames[i])
        loaded_imgs.append(img)
        pcam = load_perspective_camera(cameraNames[i])
        loaded_cams.append(pcam)
    
      refine_cnt = 5;
      for rfk in range(0, refine_cnt, 1):
        pair = zip(loaded_imgs, loaded_cams)
        random.shuffle(pair)
        for idx, (img, cam) in enumerate(pair):
          self.update_state(state='PROCESSING', meta={'stage':'update', 
              'i':rfk+1, 'total':refine_cnt, 'image':idx+1, 
              'images':len(loaded_imgs)})
          logger.debug("refine_cnt: %d, idx: %d", rfk, idx)
          vxl_scene.update(cam,img,True,True,None,openclDevice[0:3],variance,
                       tnear = 1000.0, tfar = 100000.0);
    
        logger.debug("writing cache: %d", rfk)
        vxl_scene.write_cache();
        logger.debug("wrote cache: %d", rfk)
        
        if rfk < refine_cnt-1:
          self.update_state(state='PROCESSING', meta={'stage':'refine', 
                                                      'i':rfk, 
                                                      'total':refine_cnt})
          logger.debug("refining %d...", rfk)
          refine_device = openclDevice[0:3]
          if refine_device == 'cpu':
            refine_device = 'cpp'
          vxl_scene.refine(0.3, refine_device);
          vxl_scene.write_cache();

      
      voxel_world_dir = mkdtemp(dir=os.environ['VIP_STORAGE_DIR'])
      copytree(processing_dir, voxel_world_dir, ignore=lambda x,y:['images'])
      models.VoxelWorld.create(
          name='%s world (%s)' % (imageCollection.name, self.request.id),
          origin=scene.origin,
          directory=voxel_world_dir,
          service_id=self.request.id).save();
コード例 #6
0
def height_map_error(self, image_id, history=None):

    import numpy as np

    import vpgl_adaptor

    from vsi.io.image import imread, GdalReader

    from voxel_globe.meta import models
    import voxel_globe.tools
    from voxel_globe.tools.celery import Popen

    from voxel_globe.tools.wget import download as wget

    tie_points_yxz = []
    control_points_yxz = []

    image = models.Image.objects.get(id=image_id).history(history)

    with voxel_globe.tools.task_dir('height_map_error_calculation',
                                    cd=True) as processing_dir:
        wget(image.originalImageUrl, image.original_filename, secret=True)
        height_reader = GdalReader(image.original_filename, autoload=True)
        transform = height_reader.object.GetGeoTransform()
        height = height_reader.raster()

    tie_point_ids = set([
        x for imagen in models.Image.objects.filter(objectId=image.objectId)
        for x in imagen.tiepoint_set.all().values_list('objectId', flat=True)
    ])

    for tie_point_id in tie_point_ids:
        tie_point = models.TiePoint.objects.get(
            objectId=tie_point_id, newerVersion=None).history(history)

        if not tie_point.deleted:
            lla_xyz = models.ControlPoint.objects.get(
                objectId=tie_point.geoPoint.objectId,
                newerVersion=None).history(history).point.coords
            control_points_yxz.append([lla_xyz[x] for x in [1, 0, 2]])
            tie_points_yxz.append([
                transform[4] * (tie_point.point.coords[0] + 0.5) +
                transform[5] * (tie_point.point.coords[1] + 0.5) +
                transform[3], transform[1] *
                (tie_point.point.coords[0] + 0.5) + transform[2] *
                (tie_point.point.coords[1] + 0.5) + transform[0],
                height[tie_point.point.coords[1], tie_point.point.coords[0]]
            ])

    origin_yxz = np.mean(np.array(control_points_yxz), axis=0)
    tie_points_local = []
    control_points_local = []
    lvcs = vpgl_adaptor.create_lvcs(origin_yxz[0], origin_yxz[1],
                                    origin_yxz[2], 'wgs84')

    for tie_point in tie_points_yxz:
        tie_points_local.append(
            vpgl_adaptor.convert_to_local_coordinates2(lvcs, *tie_point))

    for control_point in control_points_yxz:
        control_points_local.append(
            vpgl_adaptor.convert_to_local_coordinates2(lvcs, *control_point))

    error = np.linalg.norm(
        np.array(tie_points_local) - np.array(control_points_local),
        axis=0) / (len(tie_points_local)**0.5)

    result = {}
    result['error'] = list(error)
    result['horizontal_accuracy'] = 2.4477 * 0.5 * (error[0] + error[1])
    result['vertical_accuracy'] = 1.96 * error[2]

    return result