Пример #1
0
def height_map_error(self, image_id, history=None):
  
  import numpy as np

  import vpgl_adaptor
  
  from vsi.io.image import imread, GdalReader
  
  from voxel_globe.meta import models
  import voxel_globe.tools
  from voxel_globe.tools.celery import Popen

  from voxel_globe.tools.wget import download as wget

  tie_points_yxz = []
  control_points_yxz = []

  image = models.Image.objects.get(id=image_id).history(history)

  with voxel_globe.tools.task_dir('height_map_error_calculation', cd=True) as processing_dir:
    wget(image.originalImageUrl, image.original_filename, secret=True)
    height_reader =  GdalReader(image.original_filename, autoload=True)
    transform = height_reader.object.GetGeoTransform()
    height = height_reader.raster()

  tie_point_ids = set([x for imagen in models.Image.objects.filter(
      objectId=image.objectId) for x in imagen.tiepoint_set.all().values_list(
      'objectId', flat=True)])

  for tie_point_id in tie_point_ids:
    tie_point = models.TiePoint.objects.get(objectId=tie_point_id, newerVersion=None).history(history)

    if not tie_point.deleted:
      lla_xyz = models.ControlPoint.objects.get(objectId = tie_point.geoPoint.objectId, newerVersion=None).history(history).point.coords
      control_points_yxz.append([lla_xyz[x] for x in [1,0,2]])
      tie_points_yxz.append([transform[4]*(tie_point.point.coords[0]+0.5) + transform[5]*(tie_point.point.coords[1]+0.5) + transform[3],
                             transform[1]*(tie_point.point.coords[0]+0.5) + transform[2]*(tie_point.point.coords[1]+0.5) + transform[0],
                             height[tie_point.point.coords[1], tie_point.point.coords[0]]])

  origin_yxz = np.mean(np.array(control_points_yxz), axis=0)
  tie_points_local = []
  control_points_local = []
  lvcs = vpgl_adaptor.create_lvcs(origin_yxz[0], origin_yxz[1], origin_yxz[2], 'wgs84')

  for tie_point in tie_points_yxz:
    tie_points_local.append(vpgl_adaptor.convert_to_local_coordinates2(lvcs, *tie_point))

  for control_point in control_points_yxz:
    control_points_local.append(vpgl_adaptor.convert_to_local_coordinates2(lvcs, *control_point))

  error = np.linalg.norm(np.array(tie_points_local)-np.array(control_points_local), axis=0)/(len(tie_points_local)**0.5)

  result={}
  result['error'] = list(error)
  result['horizontal_accuracy'] = 2.4477*0.5*(error[0]+error[1])
  result['vertical_accuracy'] = 1.96*error[2]

  return result
Пример #2
0
def make_order_3(request, image_collection_id, scene_id):
  import voxel_globe.tools.enu as enu
  from voxel_globe.tools.camera import get_llh
  import numpy as np
  
  image_collection = models.ImageCollection.objects.get(id=image_collection_id)
  image_list = image_collection.images.all()
  scene = models.Scene.objects.get(id=scene_id);

  geolocated = scene.geolocated

  # #if min==max, they are probably all zeros and the bounding box isn't set yet
  # if scene.bbox_min == scene.bbox_max:
  #   #TODO: Replace this with bundle2scene in visualsfm task
  #   llhs = []
  
  #   for image in image_list:
  #     llhs.append(get_llh(image.history()))

  #   llhs = np.array(llhs)
  #   bbox_min = llhs.min(axis=0)
  #   bbox_max = llhs.max(axis=0)

  #   #The above code calculated the bounding box of the cameras. Set the 
  #   #bounding box of the scene to be from the bottom of the lowest camera to
  #   #down to sea level. Hard to do anything else intelligently
  #   bbox_max[2] = bbox_min[2]
  #   bbox_min[2] = 0
  #   voxel_size = 1
  # else:
  bbox_min = scene.bbox_min
  bbox_max = scene.bbox_max
  voxel_size = sum(scene.default_voxel_size.coords)/3.0

  #if geolocated, convert lvcs to lla
  if geolocated:
    from vpgl_adaptor import create_lvcs, convert_local_to_global_coordinates
    origin = scene.origin.coords
    lvcs = create_lvcs(origin[1], origin[0], origin[2], "wgs84");
    (bbox_min[1], bbox_min[0], bbox_min[2]) = convert_local_to_global_coordinates(lvcs,bbox_min[1], bbox_min[0], bbox_min[2])
    (bbox_max[1], bbox_max[0], bbox_max[2]) = convert_local_to_global_coordinates(lvcs,bbox_max[1], bbox_max[0], bbox_max[2])

  bbox = {'x_min':bbox_min[0],
          'x_max':bbox_max[0],
          'y_min':bbox_min[1],
          'y_max':bbox_max[1],
          'z_min':bbox_min[2],
          'z_max':bbox_max[2]}

  
  return render(request, 'order/build_voxel_world/html/make_order_3.html',
                {'scene_id':scene_id, 'bbox':bbox, 'geolocated':geolocated,
                 'voxel_size': voxel_size,
                 'image_collection_id':image_collection_id})
Пример #3
0
def make_order_3(request, image_collection_id, scene_id):
  import voxel_globe.tools.enu as enu
  from voxel_globe.tools.camera import get_llh
  import numpy as np
  
  image_collection = models.ImageCollection.objects.get(id=image_collection_id)
  image_list = image_collection.images.all()
  scene = models.Scene.objects.get(id=scene_id);

  geolocated = scene.geolocated

  # #if min==max, they are probably all zeros and the bounding box isn't set yet
  # if scene.bbox_min == scene.bbox_max:
  #   #TODO: Replace this with bundle2scene in visualsfm task
  #   llhs = []
  
  #   for image in image_list:
  #     llhs.append(get_llh(image.history()))

  #   llhs = np.array(llhs)
  #   bbox_min = llhs.min(axis=0)
  #   bbox_max = llhs.max(axis=0)

  #   #The above code calculated the bounding box of the cameras. Set the 
  #   #bounding box of the scene to be from the bottom of the lowest camera to
  #   #down to sea level. Hard to do anything else intelligently
  #   bbox_max[2] = bbox_min[2]
  #   bbox_min[2] = 0
  #   voxel_size = 1
  # else:
  bbox_min = scene.bbox_min
  bbox_max = scene.bbox_max
  voxel_size = sum(scene.default_voxel_size.coords)/3.0

  #if geolocated, convert lvcs to lla
  if geolocated:
    from vpgl_adaptor import create_lvcs, convert_local_to_global_coordinates
    origin = scene.origin.coords
    lvcs = create_lvcs(origin[1], origin[0], origin[2], "wgs84");
    (bbox_min[1], bbox_min[0], bbox_min[2]) = convert_local_to_global_coordinates(lvcs,bbox_min[1], bbox_min[0], bbox_min[2])
    (bbox_max[1], bbox_max[0], bbox_max[2]) = convert_local_to_global_coordinates(lvcs,bbox_max[1], bbox_max[0], bbox_max[2])

  bbox = {'x_min':bbox_min[0],
          'x_max':bbox_max[0],
          'y_min':bbox_min[1],
          'y_max':bbox_max[1],
          'z_min':bbox_min[2],
          'z_max':bbox_max[2]}

  
  return render(request, 'order/build_voxel_world/html/make_order_3.html',
                {'scene_id':scene_id, 'bbox':bbox, 'geolocated':geolocated,
                 'voxel_size': voxel_size,
                 'image_collection_id':image_collection_id})
Пример #4
0
def get_point_cloud(point_cloud_id, number_points=None, history=None):
  from voxel_globe.meta import models
  from vpgl_adaptor import convert_local_to_global_coordinates_array, create_lvcs
  import os
  import numpy as np
  from plyfile import PlyData

  point_cloud = models.PointCloud.objects.get(id=point_cloud_id).history(history)

  lvcs = create_lvcs(point_cloud.origin[1], point_cloud.origin[0], point_cloud.origin[2], 'wgs84')

  ply = PlyData.read(str(os.path.join(point_cloud.directory, 'error.ply')))
  data = ply.elements[0].data

  if number_points:
    try:
      import heapq
      data = np.array(heapq.nlargest(number_points, ply.elements[0].data, 
                                     key=lambda x:x['prob']))
    except IndexError: #not a correctly formated ply file. HACK A CODE!
      #This is a hack-a-code for Tom's ply file
      data = ply.elements[0].data.astype([('x', '<f4'), ('y', '<f4'), 
          ('z', '<f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1'), 
          ('prob', '<f4')])
      import copy
      blah = copy.deepcopy(data['y'])
      data['y'] = data['z']
      data['z'] = -blah
      blah = copy.deepcopy(data['blue'])
      data['blue'] = data['green']
      data['green'] = blah

      data['prob'] = abs(data['x'] - 10 - sum(data['x'])/len(data['x'])) \
                   + abs(data['y'] + 30 - sum(data['y'])/len(data['y'])) \
                   + abs(data['z'] - sum(data['z'])/len(data['z']))
      data['prob'] = max(data['prob']) - data['prob']

      data = np.array(heapq.nlargest(number_points, data, 
                                     key=lambda x:x['prob']))
      print data['prob']



  
  lla = convert_local_to_global_coordinates_array(lvcs, data['x'].tolist(), data['y'].tolist(), data['z'].tolist());

  latitude = np.array(lla[0])
  longitude = np.array(lla[1])
  altitude = np.array(lla[2])
  color = map(lambda r,b,g:'#%02x%02x%02x' % (r, g, b), data['red'], data['green'], data['blue'])

  return_data = {"latitude": latitude, "longitude": longitude,
                 "altitude": altitude, "color": color}

  try:
    return_data['le'] = data['le']
  except ValueError:
    return_data['le'] = (-np.ones(len(latitude))).tolist()
  try:
    return_data['ce'] = data['ce']
  except ValueError:
    return_data['ce'] = (-np.ones(len(latitude))).tolist()

  return return_data
Пример #5
0
def tiepoint_error_calculation(self,
                               image_collection_id,
                               scene_id,
                               history=None):
    from PIL import Image
    import numpy as np

    import vpgl_adaptor

    from voxel_globe.meta import models
    import voxel_globe.tools
    from voxel_globe.tools.camera import get_krt
    from voxel_globe.tools.celery import Popen

    from voxel_globe.tools.xml_dict import load_xml

    self.update_state(state='INITIALIZE',
                      meta={
                          'id': image_collection_id,
                          'scene': scene_id
                      })

    image_collection = models.ImageCollection.objects.get(
        id=image_collection_id).history(history)

    control_points = {}

    for fr, image in enumerate(image_collection.images.all()):
        image = image.history(history)
        tiepoint_ids = set([
            x
            for imagen in models.Image.objects.filter(objectId=image.objectId)
            for x in imagen.tiepoint_set.all().values_list('objectId',
                                                           flat=True)
        ])
        for tiepoint_id in tiepoint_ids:
            tiepoint = models.TiePoint.objects.get(
                objectId=tiepoint_id, newerVersion=None).history(history)

            #demoware code hack!
            if not 'error' in tiepoint.geoPoint.name.lower():
                continue

            if not tiepoint.deleted:
                control_point_id = tiepoint.geoPoint.objectId
                if control_point_id not in control_points:
                    control_points[control_point_id] = {'tiepoints': {}}
                control_points[control_point_id]['tiepoints'][fr] = list(
                    tiepoint.point)
                lla_xyz = models.ControlPoint.objects.get(
                    objectId=control_point_id,
                    newerVersion=None).history(history).point.coords
                control_points[control_point_id]['3d'] = [
                    lla_xyz[x] for x in [1, 0, 2]
                ]

    #filter only control points with more than 1 tiepoint
    control_points = {
        k: v
        for k, v in control_points.iteritems()
        if len(v['tiepoints'].keys()) > 1
    }

    origin_xyz = list(models.Scene.objects.get(id=scene_id).origin)
    lvcs = vpgl_adaptor.create_lvcs(origin_xyz[1], origin_xyz[0],
                                    origin_xyz[2], 'wgs84')
    for control_point in control_points:
        control_points[control_point][
            'lvcs'] = vpgl_adaptor.convert_to_local_coordinates2(
                lvcs, *control_points[control_point]['3d'])

    images = {}

    with voxel_globe.tools.task_dir('visualsfm', cd=True) as processing_dir:
        dummy_imagename = os.path.join(processing_dir, 'blank.jpg')
        img = Image.fromarray(np.empty([1, 1], dtype=np.uint8))
        img.save(dummy_imagename)
        #Thank you stupid site file

        for fr, image in enumerate(image_collection.images.all()):
            (K, R, T, o) = get_krt(image.history(history), history=history)
            images[fr] = image.objectId

            with open(os.path.join(processing_dir, 'frame_%05d.txt' % fr),
                      'w') as fid:
                print >> fid, (("%0.18f " * 3 + "\n") * 3) % (
                    K[0, 0], K[0, 1], K[0, 2], K[1, 0], K[1, 1], K[1, 2],
                    K[2, 0], K[2, 1], K[2, 2])
                print >> fid, (("%0.18f " * 3 + "\n") * 3) % (
                    R[0, 0], R[0, 1], R[0, 2], R[1, 0], R[1, 1], R[1, 2],
                    R[2, 0], R[2, 1], R[2, 2])
                print >> fid, ("%0.18f " * 3 + "\n") % (T[0, 0], T[1, 0], T[2,
                                                                            0])
        site_in_name = os.path.join(processing_dir, 'site.xml')
        site_out_name = os.path.join(processing_dir, 'site2.xml')
        with open(site_in_name, 'w') as fid:
            fid.write('''<BWM_VIDEO_SITE name="Triangulation">
<videoSiteDir path="%s">
</videoSiteDir>
<videoPath path="%s">
</videoPath>
<cameraPath path="%s/*.txt">
</cameraPath>
<Objects>
</Objects>ve
<Correspondences>\n''' % (processing_dir, dummy_imagename, processing_dir))
            for control_point_index, control_point_id in enumerate(
                    control_points):
                fid.write('<Correspondence id="%d">\n' % control_point_index)
                for fr, tie_point in control_points[control_point_id][
                        'tiepoints'].iteritems():
                    fid.write('<CE fr="%d" u="%f" v="%f"/>\n' %
                              (fr, tie_point[0], tie_point[1]))
                fid.write('</Correspondence>\n')
                control_points[control_point_id]['id'] = control_point_index
            fid.write('''</Correspondences>
</BWM_VIDEO_SITE>\n''')

        #triangulate the points
        Popen([
            'bwm_triangulate_2d_corrs', '-site', site_in_name, '-out',
            site_out_name
        ],
              logger=logger).wait()

        #Read in the result, and load into points_triangulate structure
        xml = load_xml(site_out_name)
        points_triangulate_id = []
        points_triangulate = []
        for correspondence in xml['Correspondences']['Correspondence']:
            points_triangulate_id.append(int(correspondence.at['id']))
            points_triangulate.append(
                (float(correspondence['corr_world_point'].at['X']),
                 float(correspondence['corr_world_point'].at['Y']),
                 float(correspondence['corr_world_point'].at['Z'])))

        #Read the points out of the control points structure, but make sure they are
        #in the same order (check id == point_id
        points_orig = []
        for point_id in points_triangulate_id:
            point = [
                v['lvcs'] for k, v in control_points.iteritems()
                if v['id'] == point_id
            ]
            points_orig.append(point[0])

    points_orig = np.array(points_orig)
    points_triangulate = np.array(points_triangulate)
    error = np.linalg.norm((points_orig - points_triangulate),
                           axis=0) / (points_orig.shape[0]**0.5)

    result = {}
    result['error'] = list(error)
    result['horizontal_accuracy'] = 2.4477 * 0.5 * (error[0] + error[1])
    result['vertical_accuracy'] = 1.96 * error[2]

    return result
Пример #6
0
def tiepoint_registration(self, image_collection_id, history=None):
    from PIL import Image
    import numpy as np

    from django.contrib.gis import geos

    import vpgl_adaptor

    from vsi.io.krt import Krt

    from voxel_globe.meta import models
    import voxel_globe.tools
    from voxel_globe.tools.camera import get_krt, save_krt
    from voxel_globe.tools.celery import Popen

    from voxel_globe.tools.xml_dict import load_xml

    self.update_state(state='INITIALIZE', meta={'id': image_collection_id})

    image_collection = models.ImageCollection.objects.get(
        id=image_collection_id).history(history)

    control_points = {}

    for fr, image in enumerate(image_collection.images.all()):
        image = image.history(history)
        tiepoint_ids = set([
            x
            for imagen in models.Image.objects.filter(objectId=image.objectId)
            for x in imagen.tiepoint_set.all().values_list('objectId',
                                                           flat=True)
        ])
        for tiepoint_id in tiepoint_ids:
            tiepoint = models.TiePoint.objects.get(
                objectId=tiepoint_id, newerVersion=None).history(history)

            #demoware code hack!
            if 'error' in tiepoint.geoPoint.name.lower():
                continue

            if not tiepoint.deleted:
                control_point_id = tiepoint.geoPoint.objectId
                if control_point_id not in control_points:
                    control_points[control_point_id] = {'tiepoints': {}}
                control_points[control_point_id]['tiepoints'][fr] = list(
                    tiepoint.point)
                lla_xyz = models.ControlPoint.objects.get(
                    objectId=control_point_id,
                    newerVersion=None).history(history).point.coords
                control_points[control_point_id]['3d'] = [
                    lla_xyz[x] for x in [1, 0, 2]
                ]

    #filter only control points with more than 1 tiepoint
    control_points = {
        k: v
        for k, v in control_points.iteritems()
        if len(v['tiepoints'].keys()) > 1
    }

    origin_yxz = np.mean([v['3d'] for k, v in control_points.iteritems()],
                         axis=0)
    lvcs = vpgl_adaptor.create_lvcs(origin_yxz[0], origin_yxz[1],
                                    origin_yxz[2], 'wgs84')
    for control_point in control_points:
        control_points[control_point][
            'lvcs'] = vpgl_adaptor.convert_to_local_coordinates2(
                lvcs, *control_points[control_point]['3d'])

    images = {}

    with voxel_globe.tools.task_dir('visualsfm', cd=True) as processing_dir:
        dummy_imagename = os.path.join(processing_dir, 'blank.jpg')
        img = Image.fromarray(np.empty([1, 1], dtype=np.uint8))
        img.save(dummy_imagename)
        #Thank you stupid site file

        for fr, image in enumerate(image_collection.images.all()):
            (K, R, T, o) = get_krt(image.history(history), history=history)
            images[fr] = image.objectId

            with open(os.path.join(processing_dir, 'frame_%05d.txt' % fr),
                      'w') as fid:
                print >> fid, (("%0.18f " * 3 + "\n") * 3) % (
                    K[0, 0], K[0, 1], K[0, 2], K[1, 0], K[1, 1], K[1, 2],
                    K[2, 0], K[2, 1], K[2, 2])
                print >> fid, (("%0.18f " * 3 + "\n") * 3) % (
                    R[0, 0], R[0, 1], R[0, 2], R[1, 0], R[1, 1], R[1, 2],
                    R[2, 0], R[2, 1], R[2, 2])
                print >> fid, ("%0.18f " * 3 + "\n") % (T[0, 0], T[1, 0], T[2,
                                                                            0])
        site_in_name = os.path.join(processing_dir, 'site.xml')
        site_out_name = os.path.join(processing_dir, 'site2.xml')
        with open(site_in_name, 'w') as fid:
            fid.write('''<BWM_VIDEO_SITE name="Triangulation">
  <videoSiteDir path="%s">
  </videoSiteDir>
  <videoPath path="%s">
  </videoPath>
  <cameraPath path="%s/*.txt">
  </cameraPath>
  <Objects>
  </Objects>ve
  <Correspondences>\n''' % (processing_dir, dummy_imagename, processing_dir))
            for control_point_index, control_point_id in enumerate(
                    control_points):
                fid.write('<Correspondence id="%d">\n' % control_point_index)
                for fr, tie_point in control_points[control_point_id][
                        'tiepoints'].iteritems():
                    fid.write('<CE fr="%d" u="%f" v="%f"/>\n' %
                              (fr, tie_point[0], tie_point[1]))
                fid.write('</Correspondence>\n')
                control_points[control_point_id]['id'] = control_point_index
            fid.write('''</Correspondences>
  </BWM_VIDEO_SITE>\n''')

        #triangulate the points
        Popen([
            'bwm_triangulate_2d_corrs', '-site', site_in_name, '-out',
            site_out_name
        ],
              logger=logger).wait()

        #Read in the result, and load into points_triangulate structure
        xml = load_xml(site_out_name)
        points_triangulate = {'id': [], 'x': [], 'y': [], 'z': []}
        for correspondence in xml['Correspondences']['Correspondence']:
            points_triangulate['id'].append(int(correspondence.at['id']))
            points_triangulate['x'].append(
                float(correspondence['corr_world_point'].at['X']))
            points_triangulate['y'].append(
                float(correspondence['corr_world_point'].at['Y']))
            points_triangulate['z'].append(
                float(correspondence['corr_world_point'].at['Z']))

        #Read the points out of the control points structure, but make sure they are
        #in the same order (check id == point_id
        points_orig = {'x': [], 'y': [], 'z': []}
        for point_id in points_triangulate['id']:
            point = [
                v['lvcs'] for k, v in control_points.iteritems()
                if v['id'] == point_id
            ]
            points_orig['x'].append(point[0][0])
            points_orig['y'].append(point[0][1])
            points_orig['z'].append(point[0][2])
        new_cameras = os.path.join(processing_dir, 'new_cameras')
        os.mkdir(new_cameras)

        #Make transformation
        transform, scale = vpgl_adaptor.compute_transformation(
            points_triangulate['x'], points_triangulate['y'],
            points_triangulate['z'], points_orig['x'], points_orig['y'],
            points_orig['z'], processing_dir, new_cameras)

        #calculate the new bounding box
        bbox_min, bbox_max = vpgl_adaptor.compute_transformed_box(
            list(image_collection.scene.bbox_min),
            list(image_collection.scene.bbox_max), transform)

        #calculate the new voxel size
        default_voxel_size = geos.Point(
            *(x * scale for x in image_collection.scene.default_voxel_size))

        scene = models.Scene.create(
            name=image_collection.scene.name + ' tiepoint registered',
            service_id=self.request.id,
            origin=geos.Point(origin_yxz[1], origin_yxz[0], origin_yxz[2]),
            bbox_min=geos.Point(*bbox_min),
            bbox_max=geos.Point(*bbox_max),
            default_voxel_size=default_voxel_size,
            geolocated=True)
        scene.save()
        image_collection.scene = scene
        image_collection.save()

        for fr, image_id in images.iteritems():
            krt = Krt.load(os.path.join(new_cameras, 'frame_%05d.txt' % fr))
            image = models.Image.objects.get(objectId=image_id,
                                             newerVersion=None)
            save_krt(self.request.id,
                     image,
                     krt.k,
                     krt.r,
                     krt.t, [origin_yxz[x] for x in [1, 0, 2]],
                     srid=4326)
Пример #7
0
def tiepoint_registration(self, image_collection_id, history=None):
  from PIL import Image
  import numpy as np

  from django.contrib.gis import geos

  import vpgl_adaptor

  from vsi.io.krt import Krt

  from voxel_globe.meta import models
  import voxel_globe.tools
  from voxel_globe.tools.camera import get_krt, save_krt
  from voxel_globe.tools.celery import Popen

  from voxel_globe.tools.xml_dict import load_xml
  
  self.update_state(state='INITIALIZE', meta={'id':image_collection_id})


  image_collection = models.ImageCollection.objects.get(id=image_collection_id).history(history)

  control_points = {}

  for fr,image in enumerate(image_collection.images.all()):
    image = image.history(history)
    tiepoint_ids = set([x for imagen in models.Image.objects.filter(objectId=image.objectId) for x in imagen.tiepoint_set.all().values_list('objectId', flat=True)])
    for tiepoint_id in tiepoint_ids:
      tiepoint = models.TiePoint.objects.get(objectId=tiepoint_id, newerVersion=None).history(history)
      
      #demoware code hack!
      if 'error' in tiepoint.geoPoint.name.lower():
        continue
      
      if not tiepoint.deleted:
        control_point_id = tiepoint.geoPoint.objectId
        if control_point_id not in control_points:
          control_points[control_point_id] = {'tiepoints':{}}
        control_points[control_point_id]['tiepoints'][fr] = list(tiepoint.point)
        lla_xyz = models.ControlPoint.objects.get(objectId = control_point_id, newerVersion=None).history(history).point.coords
        control_points[control_point_id]['3d'] = [lla_xyz[x] for x in [1,0,2]]

  #filter only control points with more than 1 tiepoint
  control_points = {k:v for k,v in control_points.iteritems() if len(v['tiepoints'].keys()) > 1}

  origin_yxz = np.mean([v['3d'] for k,v in control_points.iteritems()], axis=0)
  lvcs = vpgl_adaptor.create_lvcs(origin_yxz[0], origin_yxz[1], origin_yxz[2], 'wgs84')
  for control_point in control_points:
    control_points[control_point]['lvcs'] = vpgl_adaptor.convert_to_local_coordinates2(lvcs, *control_points[control_point]['3d'])

  images = {}

  with voxel_globe.tools.task_dir('visualsfm', cd=True) as processing_dir:
    dummy_imagename = os.path.join(processing_dir, 'blank.jpg')
    img = Image.fromarray(np.empty([1,1], dtype=np.uint8))
    img.save(dummy_imagename)
    #Thank you stupid site file
      
    for fr,image in enumerate(image_collection.images.all()):
      (K,R,T,o) = get_krt(image.history(history), history=history)
      images[fr] = image.objectId

      with open(os.path.join(processing_dir, 'frame_%05d.txt' % fr), 'w') as fid:
        print >>fid, (("%0.18f "*3+"\n")*3) % (K[0,0], K[0,1], K[0,2], 
            K[1,0], K[1,1], K[1,2], K[2,0], K[2,1], K[2,2]);
        print >>fid, (("%0.18f "*3+"\n")*3) % (R[0,0], R[0,1], R[0,2], 
            R[1,0], R[1,1], R[1,2], R[2,0], R[2,1], R[2,2]);
        print >>fid, ("%0.18f "*3+"\n") % (T[0,0], T[1,0], T[2,0]);
    site_in_name = os.path.join(processing_dir, 'site.xml')
    site_out_name = os.path.join(processing_dir, 'site2.xml')
    with open(site_in_name, 'w') as fid:
      fid.write('''<BWM_VIDEO_SITE name="Triangulation">
  <videoSiteDir path="%s">
  </videoSiteDir>
  <videoPath path="%s">
  </videoPath>
  <cameraPath path="%s/*.txt">
  </cameraPath>
  <Objects>
  </Objects>ve
  <Correspondences>\n''' % (processing_dir, dummy_imagename, processing_dir))
      for control_point_index, control_point_id in enumerate(control_points):
        fid.write('<Correspondence id="%d">\n' % control_point_index)
        for fr, tie_point in control_points[control_point_id]['tiepoints'].iteritems():
          fid.write('<CE fr="%d" u="%f" v="%f"/>\n' % (fr, tie_point[0], tie_point[1]))
        fid.write('</Correspondence>\n')
        control_points[control_point_id]['id'] = control_point_index
      fid.write('''</Correspondences>
  </BWM_VIDEO_SITE>\n''')
    
    #triangulate the points
    Popen(['bwm_triangulate_2d_corrs', '-site', site_in_name, '-out', site_out_name], logger=logger).wait()

    #Read in the result, and load into points_triangulate structure
    xml = load_xml(site_out_name)
    points_triangulate = {'id':[], 'x':[], 'y':[], 'z':[]}
    for correspondence in xml['Correspondences']['Correspondence']:
      points_triangulate['id'].append(int(correspondence.at['id']))
      points_triangulate['x'].append(float(correspondence['corr_world_point'].at['X']))
      points_triangulate['y'].append(float(correspondence['corr_world_point'].at['Y']))
      points_triangulate['z'].append(float(correspondence['corr_world_point'].at['Z']))
      
    #Read the points out of the control points structure, but make sure they are 
    #in the same order (check id == point_id
    points_orig = {'x':[], 'y':[], 'z':[]}
    for point_id in points_triangulate['id']:
      point = [v['lvcs'] for k,v in control_points.iteritems() if v['id'] == point_id]
      points_orig['x'].append(point[0][0])
      points_orig['y'].append(point[0][1])
      points_orig['z'].append(point[0][2])
    new_cameras = os.path.join(processing_dir, 'new_cameras')
    os.mkdir(new_cameras)
    
    #Make transformation
    transform, scale = vpgl_adaptor.compute_transformation(points_triangulate['x'], points_triangulate['y'], points_triangulate['z'],
                                        points_orig['x'],points_orig['y'],points_orig['z'],
                                        processing_dir, new_cameras)

    #calculate the new bounding box
    bbox_min, bbox_max = vpgl_adaptor.compute_transformed_box(list(image_collection.scene.bbox_min), list(image_collection.scene.bbox_max), transform)
    
    #calculate the new voxel size
    default_voxel_size=geos.Point(*(x*scale for x in image_collection.scene.default_voxel_size))
    
    scene = models.Scene.create(name=image_collection.scene.name+' tiepoint registered', 
                        service_id=self.request.id,
                        origin=geos.Point(origin_yxz[1], origin_yxz[0], origin_yxz[2]),
                        bbox_min=geos.Point(*bbox_min),
                        bbox_max=geos.Point(*bbox_max),
                        default_voxel_size=default_voxel_size,
                        geolocated=True)
    scene.save()
    image_collection.scene=scene
    image_collection.save()

    for fr, image_id in images.iteritems():
      krt = Krt.load(os.path.join(new_cameras, 'frame_%05d.txt' % fr))
      image = models.Image.objects.get(objectId=image_id, newerVersion=None)
      save_krt(self.request.id, image, krt.k, krt.r, krt.t, [origin_yxz[x] for x in [1,0,2]], srid=4326)
Пример #8
0
def tiepoint_error_calculation(self, image_collection_id, scene_id, history=None):
  from PIL import Image
  import numpy as np

  import vpgl_adaptor

  from voxel_globe.meta import models
  import voxel_globe.tools
  from voxel_globe.tools.camera import get_krt
  from voxel_globe.tools.celery import Popen

  from voxel_globe.tools.xml_dict import load_xml

  self.update_state(state='INITIALIZE', meta={'id':image_collection_id, 'scene':scene_id})

  image_collection = models.ImageCollection.objects.get(id=image_collection_id).history(history)

  control_points = {}

  for fr,image in enumerate(image_collection.images.all()):
    image = image.history(history)
    tiepoint_ids = set([x for imagen in models.Image.objects.filter(objectId=image.objectId) for x in imagen.tiepoint_set.all().values_list('objectId', flat=True)])
    for tiepoint_id in tiepoint_ids:
      tiepoint = models.TiePoint.objects.get(objectId=tiepoint_id, newerVersion=None).history(history)
      
      #demoware code hack!
      if not 'error' in tiepoint.geoPoint.name.lower():
        continue
      
      if not tiepoint.deleted:
        control_point_id = tiepoint.geoPoint.objectId
        if control_point_id not in control_points:
          control_points[control_point_id] = {'tiepoints':{}}
        control_points[control_point_id]['tiepoints'][fr] = list(tiepoint.point)
        lla_xyz = models.ControlPoint.objects.get(objectId = control_point_id, newerVersion=None).history(history).point.coords
        control_points[control_point_id]['3d'] = [lla_xyz[x] for x in [1,0,2]]

  #filter only control points with more than 1 tiepoint
  control_points = {k:v for k,v in control_points.iteritems() if len(v['tiepoints'].keys()) > 1}

  origin_xyz = list(models.Scene.objects.get(id=scene_id).origin)
  lvcs = vpgl_adaptor.create_lvcs(origin_xyz[1], origin_xyz[0], origin_xyz[2], 'wgs84')
  for control_point in control_points:
    control_points[control_point]['lvcs'] = vpgl_adaptor.convert_to_local_coordinates2(lvcs, *control_points[control_point]['3d'])

  images = {}

  with voxel_globe.tools.task_dir('visualsfm', cd=True) as processing_dir:
    dummy_imagename = os.path.join(processing_dir, 'blank.jpg')
    img = Image.fromarray(np.empty([1,1], dtype=np.uint8))
    img.save(dummy_imagename)
    #Thank you stupid site file
      
    for fr,image in enumerate(image_collection.images.all()):
      (K,R,T,o) = get_krt(image.history(history), history=history)
      images[fr] = image.objectId

      with open(os.path.join(processing_dir, 'frame_%05d.txt' % fr), 'w') as fid:
        print >>fid, (("%0.18f "*3+"\n")*3) % (K[0,0], K[0,1], K[0,2], 
            K[1,0], K[1,1], K[1,2], K[2,0], K[2,1], K[2,2]);
        print >>fid, (("%0.18f "*3+"\n")*3) % (R[0,0], R[0,1], R[0,2], 
            R[1,0], R[1,1], R[1,2], R[2,0], R[2,1], R[2,2]);
        print >>fid, ("%0.18f "*3+"\n") % (T[0,0], T[1,0], T[2,0]);
    site_in_name = os.path.join(processing_dir, 'site.xml')
    site_out_name = os.path.join(processing_dir, 'site2.xml')
    with open(site_in_name, 'w') as fid:
      fid.write('''<BWM_VIDEO_SITE name="Triangulation">
<videoSiteDir path="%s">
</videoSiteDir>
<videoPath path="%s">
</videoPath>
<cameraPath path="%s/*.txt">
</cameraPath>
<Objects>
</Objects>ve
<Correspondences>\n''' % (processing_dir, dummy_imagename, processing_dir))
      for control_point_index, control_point_id in enumerate(control_points):
        fid.write('<Correspondence id="%d">\n' % control_point_index)
        for fr, tie_point in control_points[control_point_id]['tiepoints'].iteritems():
          fid.write('<CE fr="%d" u="%f" v="%f"/>\n' % (fr, tie_point[0], tie_point[1]))
        fid.write('</Correspondence>\n')
        control_points[control_point_id]['id'] = control_point_index
      fid.write('''</Correspondences>
</BWM_VIDEO_SITE>\n''')
    
    #triangulate the points
    Popen(['bwm_triangulate_2d_corrs', '-site', site_in_name, '-out', site_out_name], logger=logger).wait()

    #Read in the result, and load into points_triangulate structure
    xml = load_xml(site_out_name)
    points_triangulate_id=[]
    points_triangulate=[]
    for correspondence in xml['Correspondences']['Correspondence']:
      points_triangulate_id.append(int(correspondence.at['id']))
      points_triangulate.append((float(correspondence['corr_world_point'].at['X']),
                                 float(correspondence['corr_world_point'].at['Y']),
                                 float(correspondence['corr_world_point'].at['Z'])))
      
    #Read the points out of the control points structure, but make sure they are 
    #in the same order (check id == point_id
    points_orig = []
    for point_id in points_triangulate_id:
      point = [v['lvcs'] for k,v in control_points.iteritems() if v['id'] == point_id]
      points_orig.append(point[0])  

  points_orig = np.array(points_orig)
  points_triangulate = np.array(points_triangulate)
  error = np.linalg.norm((points_orig-points_triangulate), axis=0)/(points_orig.shape[0]**0.5)

  result={}
  result['error'] = list(error)
  result['horizontal_accuracy'] = 2.4477*0.5*(error[0]+error[1])
  result['vertical_accuracy'] = 1.96*error[2]

  return result
Пример #9
0
def get_point_cloud(point_cloud_id, number_points=None, history=None):
    from voxel_globe.meta import models
    from vpgl_adaptor import convert_local_to_global_coordinates_array, create_lvcs
    import os
    import numpy as np
    from plyfile import PlyData

    point_cloud = models.PointCloud.objects.get(
        id=point_cloud_id).history(history)

    lvcs = create_lvcs(point_cloud.origin[1], point_cloud.origin[0],
                       point_cloud.origin[2], 'wgs84')

    ply = PlyData.read(str(point_cloud.filename))

    data = ply.elements[0].data

    if number_points:
        try:
            import heapq
            data = np.array(
                heapq.nlargest(number_points,
                               ply.elements[0].data,
                               key=lambda x: x['prob']))
        except IndexError:  #not a correctly formated ply file. HACK A CODE!
            #This is a hack-a-code for Tom's ply file
            data = ply.elements[0].data.astype([('x', '<f4'), ('y', '<f4'),
                                                ('z', '<f4'), ('red', 'u1'),
                                                ('green', 'u1'),
                                                ('blue', 'u1'),
                                                ('prob', '<f4')])
            import copy
            blah = copy.deepcopy(data['y'])
            data['y'] = data['z']
            data['z'] = -blah

            data['prob'] = abs(data['x'] - 10 - sum(data['x'])/len(data['x'])) \
                         + abs(data['y'] + 30 - sum(data['y'])/len(data['y'])) \
                         + abs(data['z'] - sum(data['z'])/len(data['z']))
            data['prob'] = max(data['prob']) - data['prob']

            data = np.array(
                heapq.nlargest(number_points, data, key=lambda x: x['prob']))
            print data['prob']

    lla = convert_local_to_global_coordinates_array(lvcs, data['x'].tolist(),
                                                    data['y'].tolist(),
                                                    data['z'].tolist())

    latitude = np.array(lla[0])
    longitude = np.array(lla[1])
    altitude = np.array(lla[2])
    color = map(lambda r, g, b: '#%02x%02x%02x' % (r, g, b), data['red'],
                data['green'], data['blue'])

    return_data = {
        "latitude": latitude,
        "longitude": longitude,
        "altitude": altitude,
        "color": color
    }

    try:
        return_data['le'] = data['le']
    except ValueError:
        return_data['le'] = (-np.ones(len(latitude))).tolist()
    try:
        return_data['ce'] = data['ce']
    except ValueError:
        return_data['ce'] = (-np.ones(len(latitude))).tolist()

    return return_data
Пример #10
0
def height_map_error(self, image_id, history=None):

    import numpy as np

    import vpgl_adaptor

    from vsi.io.image import imread, GdalReader

    from voxel_globe.meta import models
    import voxel_globe.tools
    from voxel_globe.tools.celery import Popen

    from voxel_globe.tools.wget import download as wget

    tie_points_yxz = []
    control_points_yxz = []

    image = models.Image.objects.get(id=image_id).history(history)

    with voxel_globe.tools.task_dir('height_map_error_calculation',
                                    cd=True) as processing_dir:
        wget(image.originalImageUrl, image.original_filename, secret=True)
        height_reader = GdalReader(image.original_filename, autoload=True)
        transform = height_reader.object.GetGeoTransform()
        height = height_reader.raster()

    tie_point_ids = set([
        x for imagen in models.Image.objects.filter(objectId=image.objectId)
        for x in imagen.tiepoint_set.all().values_list('objectId', flat=True)
    ])

    for tie_point_id in tie_point_ids:
        tie_point = models.TiePoint.objects.get(
            objectId=tie_point_id, newerVersion=None).history(history)

        if not tie_point.deleted:
            lla_xyz = models.ControlPoint.objects.get(
                objectId=tie_point.geoPoint.objectId,
                newerVersion=None).history(history).point.coords
            control_points_yxz.append([lla_xyz[x] for x in [1, 0, 2]])
            tie_points_yxz.append([
                transform[4] * (tie_point.point.coords[0] + 0.5) +
                transform[5] * (tie_point.point.coords[1] + 0.5) +
                transform[3], transform[1] *
                (tie_point.point.coords[0] + 0.5) + transform[2] *
                (tie_point.point.coords[1] + 0.5) + transform[0],
                height[tie_point.point.coords[1], tie_point.point.coords[0]]
            ])

    origin_yxz = np.mean(np.array(control_points_yxz), axis=0)
    tie_points_local = []
    control_points_local = []
    lvcs = vpgl_adaptor.create_lvcs(origin_yxz[0], origin_yxz[1],
                                    origin_yxz[2], 'wgs84')

    for tie_point in tie_points_yxz:
        tie_points_local.append(
            vpgl_adaptor.convert_to_local_coordinates2(lvcs, *tie_point))

    for control_point in control_points_yxz:
        control_points_local.append(
            vpgl_adaptor.convert_to_local_coordinates2(lvcs, *control_point))

    error = np.linalg.norm(
        np.array(tie_points_local) - np.array(control_points_local),
        axis=0) / (len(tie_points_local)**0.5)

    result = {}
    result['error'] = list(error)
    result['horizontal_accuracy'] = 2.4477 * 0.5 * (error[0] + error[1])
    result['vertical_accuracy'] = 1.96 * error[2]

    return result