示例#1
0
def cameras_krt(request):
    if request.method == 'POST':
        form = forms.TiePointForm(request.POST)
        if form.is_valid():
            from StringIO import StringIO
            import math
            import json
            import zipfile

            import numpy as np

            from voxel_globe.tools.camera import get_krt

            image_collection = form.cleaned_data['image_collection']

            _, _, _, origin = get_krt(
                image_collection.images.all()[0].history())
            krts = []
            name_format = 'frame_%%0%dd.txt' % int(
                math.ceil(
                    math.log10(
                        max(image_collection.images.all().values_list(
                            'id', flat=True)))))
            zip_s = StringIO()
            with zipfile.ZipFile(zip_s, 'w', zipfile.ZIP_DEFLATED) as zipper:
                for image in image_collection.images.all():
                    k, r, t, _ = get_krt(image.history(), origin=origin)
                    krt_s = StringIO()
                    np.savetxt(krt_s, np.array(k))
                    krt_s.write('\n')
                    np.savetxt(krt_s, np.array(r))
                    krt_s.write('\n')
                    np.savetxt(krt_s, np.array(t).T)
                    zipper.writestr(name_format % image.id, krt_s.getvalue())
                zipper.writestr(
                    'scene.json',
                    json.dumps({
                        'origin': origin,
                        'longitude': origin[0],
                        'latitude': origin[1],
                        'altitude': origin[2]
                    }))

            response = HttpResponse(zip_s.getvalue(),
                                    content_type='application/zip')
            response['Content-Length'] = len(response.content)
            response['Content-Disposition'] = 'attachment; ' + \
                'filename=cameras_%d.zip' % image_collection.id
            return response

    else:
        form = forms.TiePointForm()

    return render(
        request, 'main/form.html', {
            'title': 'Voxel Globe - Download',
            'page_title':
            'Voxel Globe - Download Cameras for Image Collection',
            'form': form
        })
示例#2
0
def cameras_krt(request):
  if request.method == 'POST':
    form = forms.CameraForm(request.POST)
    if form.is_valid():
      from StringIO import StringIO
      import math
      import json
      import zipfile

      import numpy as np

      from voxel_globe.tools.camera import get_krt

      image_set = form.cleaned_data['image_set']
      camera_set = form.data['camera_set']

      _,_,_,origin = get_krt(image_set.images.all()[0], camera_set)
      krts = []
      name_format = 'frame_%%0%dd.txt' % int(math.ceil(math.log10(max(image_set.images.all().values_list('id', flat=True)))))
      zip_s = StringIO()
      with zipfile.ZipFile(zip_s, 'w', zipfile.ZIP_DEFLATED) as zipper:
        for image in image_set.images.all():
          k,r,t,_ = get_krt(image, camera_set, origin=origin)
          krt_s = StringIO()
          np.savetxt(krt_s, np.array(k))
          krt_s.write('\n')
          np.savetxt(krt_s, np.array(r))
          krt_s.write('\n')
          np.savetxt(krt_s, np.array(t).T)
          zipper.writestr(name_format % image.id, krt_s.getvalue())
        zipper.writestr('scene.json', json.dumps({'origin':origin, 'longitude':origin[0], 'latitude':origin[1], 'altitude':origin[2]}))

      response = HttpResponse(zip_s.getvalue(), content_type='application/zip')
      response['Content-Length'] = len(response.content)
      response['Content-Disposition'] = 'attachment; ' + \
          'filename=cameras_%d.zip' % image_set.id
      return response

  else:
    form = forms.CameraForm()

  return render(request, 'main/form.html',
                {'form':form,
                 'action': '/download/cameras'})
示例#3
0
def tiepoint_error_calculation(self,
                               image_collection_id,
                               scene_id,
                               history=None):
    from PIL import Image
    import numpy as np

    import vpgl_adaptor

    from voxel_globe.meta import models
    import voxel_globe.tools
    from voxel_globe.tools.camera import get_krt
    from voxel_globe.tools.celery import Popen

    from voxel_globe.tools.xml_dict import load_xml

    self.update_state(state='INITIALIZE',
                      meta={
                          'id': image_collection_id,
                          'scene': scene_id
                      })

    image_collection = models.ImageCollection.objects.get(
        id=image_collection_id).history(history)

    control_points = {}

    for fr, image in enumerate(image_collection.images.all()):
        image = image.history(history)
        tiepoint_ids = set([
            x
            for imagen in models.Image.objects.filter(objectId=image.objectId)
            for x in imagen.tiepoint_set.all().values_list('objectId',
                                                           flat=True)
        ])
        for tiepoint_id in tiepoint_ids:
            tiepoint = models.TiePoint.objects.get(
                objectId=tiepoint_id, newerVersion=None).history(history)

            #demoware code hack!
            if not 'error' in tiepoint.geoPoint.name.lower():
                continue

            if not tiepoint.deleted:
                control_point_id = tiepoint.geoPoint.objectId
                if control_point_id not in control_points:
                    control_points[control_point_id] = {'tiepoints': {}}
                control_points[control_point_id]['tiepoints'][fr] = list(
                    tiepoint.point)
                lla_xyz = models.ControlPoint.objects.get(
                    objectId=control_point_id,
                    newerVersion=None).history(history).point.coords
                control_points[control_point_id]['3d'] = [
                    lla_xyz[x] for x in [1, 0, 2]
                ]

    #filter only control points with more than 1 tiepoint
    control_points = {
        k: v
        for k, v in control_points.iteritems()
        if len(v['tiepoints'].keys()) > 1
    }

    origin_xyz = list(models.Scene.objects.get(id=scene_id).origin)
    lvcs = vpgl_adaptor.create_lvcs(origin_xyz[1], origin_xyz[0],
                                    origin_xyz[2], 'wgs84')
    for control_point in control_points:
        control_points[control_point][
            'lvcs'] = vpgl_adaptor.convert_to_local_coordinates2(
                lvcs, *control_points[control_point]['3d'])

    images = {}

    with voxel_globe.tools.task_dir('visualsfm', cd=True) as processing_dir:
        dummy_imagename = os.path.join(processing_dir, 'blank.jpg')
        img = Image.fromarray(np.empty([1, 1], dtype=np.uint8))
        img.save(dummy_imagename)
        #Thank you stupid site file

        for fr, image in enumerate(image_collection.images.all()):
            (K, R, T, o) = get_krt(image.history(history), history=history)
            images[fr] = image.objectId

            with open(os.path.join(processing_dir, 'frame_%05d.txt' % fr),
                      'w') as fid:
                print >> fid, (("%0.18f " * 3 + "\n") * 3) % (
                    K[0, 0], K[0, 1], K[0, 2], K[1, 0], K[1, 1], K[1, 2],
                    K[2, 0], K[2, 1], K[2, 2])
                print >> fid, (("%0.18f " * 3 + "\n") * 3) % (
                    R[0, 0], R[0, 1], R[0, 2], R[1, 0], R[1, 1], R[1, 2],
                    R[2, 0], R[2, 1], R[2, 2])
                print >> fid, ("%0.18f " * 3 + "\n") % (T[0, 0], T[1, 0], T[2,
                                                                            0])
        site_in_name = os.path.join(processing_dir, 'site.xml')
        site_out_name = os.path.join(processing_dir, 'site2.xml')
        with open(site_in_name, 'w') as fid:
            fid.write('''<BWM_VIDEO_SITE name="Triangulation">
<videoSiteDir path="%s">
</videoSiteDir>
<videoPath path="%s">
</videoPath>
<cameraPath path="%s/*.txt">
</cameraPath>
<Objects>
</Objects>ve
<Correspondences>\n''' % (processing_dir, dummy_imagename, processing_dir))
            for control_point_index, control_point_id in enumerate(
                    control_points):
                fid.write('<Correspondence id="%d">\n' % control_point_index)
                for fr, tie_point in control_points[control_point_id][
                        'tiepoints'].iteritems():
                    fid.write('<CE fr="%d" u="%f" v="%f"/>\n' %
                              (fr, tie_point[0], tie_point[1]))
                fid.write('</Correspondence>\n')
                control_points[control_point_id]['id'] = control_point_index
            fid.write('''</Correspondences>
</BWM_VIDEO_SITE>\n''')

        #triangulate the points
        Popen([
            'bwm_triangulate_2d_corrs', '-site', site_in_name, '-out',
            site_out_name
        ],
              logger=logger).wait()

        #Read in the result, and load into points_triangulate structure
        xml = load_xml(site_out_name)
        points_triangulate_id = []
        points_triangulate = []
        for correspondence in xml['Correspondences']['Correspondence']:
            points_triangulate_id.append(int(correspondence.at['id']))
            points_triangulate.append(
                (float(correspondence['corr_world_point'].at['X']),
                 float(correspondence['corr_world_point'].at['Y']),
                 float(correspondence['corr_world_point'].at['Z'])))

        #Read the points out of the control points structure, but make sure they are
        #in the same order (check id == point_id
        points_orig = []
        for point_id in points_triangulate_id:
            point = [
                v['lvcs'] for k, v in control_points.iteritems()
                if v['id'] == point_id
            ]
            points_orig.append(point[0])

    points_orig = np.array(points_orig)
    points_triangulate = np.array(points_triangulate)
    error = np.linalg.norm((points_orig - points_triangulate),
                           axis=0) / (points_orig.shape[0]**0.5)

    result = {}
    result['error'] = list(error)
    result['horizontal_accuracy'] = 2.4477 * 0.5 * (error[0] + error[1])
    result['vertical_accuracy'] = 1.96 * error[2]

    return result
示例#4
0
def tiepoint_registration(self, image_collection_id, history=None):
    from PIL import Image
    import numpy as np

    from django.contrib.gis import geos

    import vpgl_adaptor

    from vsi.io.krt import Krt

    from voxel_globe.meta import models
    import voxel_globe.tools
    from voxel_globe.tools.camera import get_krt, save_krt
    from voxel_globe.tools.celery import Popen

    from voxel_globe.tools.xml_dict import load_xml

    self.update_state(state='INITIALIZE', meta={'id': image_collection_id})

    image_collection = models.ImageCollection.objects.get(
        id=image_collection_id).history(history)

    control_points = {}

    for fr, image in enumerate(image_collection.images.all()):
        image = image.history(history)
        tiepoint_ids = set([
            x
            for imagen in models.Image.objects.filter(objectId=image.objectId)
            for x in imagen.tiepoint_set.all().values_list('objectId',
                                                           flat=True)
        ])
        for tiepoint_id in tiepoint_ids:
            tiepoint = models.TiePoint.objects.get(
                objectId=tiepoint_id, newerVersion=None).history(history)

            #demoware code hack!
            if 'error' in tiepoint.geoPoint.name.lower():
                continue

            if not tiepoint.deleted:
                control_point_id = tiepoint.geoPoint.objectId
                if control_point_id not in control_points:
                    control_points[control_point_id] = {'tiepoints': {}}
                control_points[control_point_id]['tiepoints'][fr] = list(
                    tiepoint.point)
                lla_xyz = models.ControlPoint.objects.get(
                    objectId=control_point_id,
                    newerVersion=None).history(history).point.coords
                control_points[control_point_id]['3d'] = [
                    lla_xyz[x] for x in [1, 0, 2]
                ]

    #filter only control points with more than 1 tiepoint
    control_points = {
        k: v
        for k, v in control_points.iteritems()
        if len(v['tiepoints'].keys()) > 1
    }

    origin_yxz = np.mean([v['3d'] for k, v in control_points.iteritems()],
                         axis=0)
    lvcs = vpgl_adaptor.create_lvcs(origin_yxz[0], origin_yxz[1],
                                    origin_yxz[2], 'wgs84')
    for control_point in control_points:
        control_points[control_point][
            'lvcs'] = vpgl_adaptor.convert_to_local_coordinates2(
                lvcs, *control_points[control_point]['3d'])

    images = {}

    with voxel_globe.tools.task_dir('visualsfm', cd=True) as processing_dir:
        dummy_imagename = os.path.join(processing_dir, 'blank.jpg')
        img = Image.fromarray(np.empty([1, 1], dtype=np.uint8))
        img.save(dummy_imagename)
        #Thank you stupid site file

        for fr, image in enumerate(image_collection.images.all()):
            (K, R, T, o) = get_krt(image.history(history), history=history)
            images[fr] = image.objectId

            with open(os.path.join(processing_dir, 'frame_%05d.txt' % fr),
                      'w') as fid:
                print >> fid, (("%0.18f " * 3 + "\n") * 3) % (
                    K[0, 0], K[0, 1], K[0, 2], K[1, 0], K[1, 1], K[1, 2],
                    K[2, 0], K[2, 1], K[2, 2])
                print >> fid, (("%0.18f " * 3 + "\n") * 3) % (
                    R[0, 0], R[0, 1], R[0, 2], R[1, 0], R[1, 1], R[1, 2],
                    R[2, 0], R[2, 1], R[2, 2])
                print >> fid, ("%0.18f " * 3 + "\n") % (T[0, 0], T[1, 0], T[2,
                                                                            0])
        site_in_name = os.path.join(processing_dir, 'site.xml')
        site_out_name = os.path.join(processing_dir, 'site2.xml')
        with open(site_in_name, 'w') as fid:
            fid.write('''<BWM_VIDEO_SITE name="Triangulation">
  <videoSiteDir path="%s">
  </videoSiteDir>
  <videoPath path="%s">
  </videoPath>
  <cameraPath path="%s/*.txt">
  </cameraPath>
  <Objects>
  </Objects>ve
  <Correspondences>\n''' % (processing_dir, dummy_imagename, processing_dir))
            for control_point_index, control_point_id in enumerate(
                    control_points):
                fid.write('<Correspondence id="%d">\n' % control_point_index)
                for fr, tie_point in control_points[control_point_id][
                        'tiepoints'].iteritems():
                    fid.write('<CE fr="%d" u="%f" v="%f"/>\n' %
                              (fr, tie_point[0], tie_point[1]))
                fid.write('</Correspondence>\n')
                control_points[control_point_id]['id'] = control_point_index
            fid.write('''</Correspondences>
  </BWM_VIDEO_SITE>\n''')

        #triangulate the points
        Popen([
            'bwm_triangulate_2d_corrs', '-site', site_in_name, '-out',
            site_out_name
        ],
              logger=logger).wait()

        #Read in the result, and load into points_triangulate structure
        xml = load_xml(site_out_name)
        points_triangulate = {'id': [], 'x': [], 'y': [], 'z': []}
        for correspondence in xml['Correspondences']['Correspondence']:
            points_triangulate['id'].append(int(correspondence.at['id']))
            points_triangulate['x'].append(
                float(correspondence['corr_world_point'].at['X']))
            points_triangulate['y'].append(
                float(correspondence['corr_world_point'].at['Y']))
            points_triangulate['z'].append(
                float(correspondence['corr_world_point'].at['Z']))

        #Read the points out of the control points structure, but make sure they are
        #in the same order (check id == point_id
        points_orig = {'x': [], 'y': [], 'z': []}
        for point_id in points_triangulate['id']:
            point = [
                v['lvcs'] for k, v in control_points.iteritems()
                if v['id'] == point_id
            ]
            points_orig['x'].append(point[0][0])
            points_orig['y'].append(point[0][1])
            points_orig['z'].append(point[0][2])
        new_cameras = os.path.join(processing_dir, 'new_cameras')
        os.mkdir(new_cameras)

        #Make transformation
        transform, scale = vpgl_adaptor.compute_transformation(
            points_triangulate['x'], points_triangulate['y'],
            points_triangulate['z'], points_orig['x'], points_orig['y'],
            points_orig['z'], processing_dir, new_cameras)

        #calculate the new bounding box
        bbox_min, bbox_max = vpgl_adaptor.compute_transformed_box(
            list(image_collection.scene.bbox_min),
            list(image_collection.scene.bbox_max), transform)

        #calculate the new voxel size
        default_voxel_size = geos.Point(
            *(x * scale for x in image_collection.scene.default_voxel_size))

        scene = models.Scene.create(
            name=image_collection.scene.name + ' tiepoint registered',
            service_id=self.request.id,
            origin=geos.Point(origin_yxz[1], origin_yxz[0], origin_yxz[2]),
            bbox_min=geos.Point(*bbox_min),
            bbox_max=geos.Point(*bbox_max),
            default_voxel_size=default_voxel_size,
            geolocated=True)
        scene.save()
        image_collection.scene = scene
        image_collection.save()

        for fr, image_id in images.iteritems():
            krt = Krt.load(os.path.join(new_cameras, 'frame_%05d.txt' % fr))
            image = models.Image.objects.get(objectId=image_id,
                                             newerVersion=None)
            save_krt(self.request.id,
                     image,
                     krt.k,
                     krt.r,
                     krt.t, [origin_yxz[x] for x in [1, 0, 2]],
                     srid=4326)
示例#5
0
def run_build_voxel_model_bp(self,
                             image_collection_id,
                             scene_id,
                             bbox,
                             skip_frames,
                             cleanup=True,
                             history=None):
    from distutils.dir_util import remove_tree
    from shutil import move
    import random

    from vsi.tools.redirect import StdRedirect, Logger as LoggerWrapper
    from voxel_globe.meta import models
    from voxel_globe.tools.camera import get_krt
    import voxel_globe.tools

    from boxm2_scene_adaptor import boxm2_scene_adaptor

    from vil_adaptor import load_image
    from vpgl_adaptor import load_perspective_camera
    from voxel_globe.tools.wget import download as wget

    from vsi.vxl.create_scene_xml import create_scene_xml

    from vsi.tools.dir_util import copytree

    with StdRedirect(
            open(
                os.path.join(voxel_globe.tools.log_dir(), self.request.id) +
                '_out.log', 'w'),
            open(
                os.path.join(voxel_globe.tools.log_dir(), self.request.id) +
                '_err.log', 'w')):

        openclDevice = os.environ['VIP_OPENCL_DEVICE']
        opencl_memory = os.environ.get('VIP_OPENCL_MEMORY', None)
        if opencl_memory:
            opencl_memory = int(opencl_memory)

        scene = models.Scene.objects.get(id=scene_id)

        imageCollection = models.ImageCollection.objects.get(\
            id=image_collection_id).history(history)
        imageList = imageCollection.images.all()

        with voxel_globe.tools.task_dir('voxel_world') as processing_dir:

            logger.warning(bbox)

            if bbox['geolocated']:
                create_scene_xml(
                    openclDevice,
                    3,
                    float(bbox['voxel_size']),
                    lla1=(float(bbox['x_min']), float(bbox['y_min']),
                          float(bbox['z_min'])),
                    lla2=(float(bbox['x_max']), float(bbox['y_max']),
                          float(bbox['z_max'])),
                    origin=scene.origin,
                    model_dir='.',
                    number_bins=1,
                    output_file=open(os.path.join(processing_dir, 'scene.xml'),
                                     'w'),
                    n_bytes_gpu=opencl_memory)
            else:
                create_scene_xml(
                    openclDevice,
                    3,
                    float(bbox['voxel_size']),
                    lvcs1=(float(bbox['x_min']), float(bbox['y_min']),
                           float(bbox['z_min'])),
                    lvcs2=(float(bbox['x_max']), float(bbox['y_max']),
                           float(bbox['z_max'])),
                    origin=scene.origin,
                    model_dir='.',
                    number_bins=1,
                    output_file=open(os.path.join(processing_dir, 'scene.xml'),
                                     'w'),
                    n_bytes_gpu=opencl_memory)

            counter = 1

            imageNames = []
            cameraNames = []

            os.mkdir(os.path.join(processing_dir, 'local'))

            #Prepping
            for image in imageList:
                self.update_state(state='INITIALIZE',
                                  meta={
                                      'stage': 'image fetch',
                                      'i': counter,
                                      'total': len(imageList)
                                  })
                image = image.history(history)
                (K, R, T, o) = get_krt(image.history(history), history=history)

                krtName = os.path.join(processing_dir, 'local',
                                       'frame_%05d.krt' % counter)

                with open(krtName, 'w') as fid:
                    print >> fid, (("%0.18f " * 3 + "\n") * 3) % (
                        K[0, 0], K[0, 1], K[0, 2], K[1, 0], K[1, 1], K[1, 2],
                        K[2, 0], K[2, 1], K[2, 2])
                    print >> fid, (("%0.18f " * 3 + "\n") * 3) % (
                        R[0, 0], R[0, 1], R[0, 2], R[1, 0], R[1, 1], R[1, 2],
                        R[2, 0], R[2, 1], R[2, 2])

                    print >> fid, ("%0.18f " * 3 + "\n") % (T[0, 0], T[1, 0],
                                                            T[2, 0])

                imageName = image.originalImageUrl
                extension = os.path.splitext(imageName)[1]
                localName = os.path.join(processing_dir, 'local',
                                         'frame_%05d%s' % (counter, extension))
                wget(imageName, localName, secret=True)

                counter += 1

                imageNames.append(localName)
                cameraNames.append(krtName)

            variance = 0.06

            vxl_scene = boxm2_scene_adaptor(
                os.path.join(processing_dir, "scene.xml"), openclDevice)

            current_level = 0

            loaded_imgs = []
            loaded_cams = []

            for i in range(0, len(imageNames), skip_frames):
                logger.debug("i: %d img name: %s cam name: %s", i,
                             imageNames[i], cameraNames[i])
                self.update_state(state='PRELOADING',
                                  meta={
                                      'stage': 'image load',
                                      'i': i,
                                      'total': len(imageNames)
                                  })
                img, ni, nj = load_image(imageNames[i])
                loaded_imgs.append(img)
                pcam = load_perspective_camera(cameraNames[i])
                loaded_cams.append(pcam)

            refine_cnt = 5
            refine_device = openclDevice[0:3]
            if refine_device == 'cpu':
                refine_device = 'cpp'

            for rfk in range(0, refine_cnt, 1):
                pair = zip(loaded_imgs, loaded_cams)
                random.shuffle(pair)
                for idx, (img, cam) in enumerate(pair):
                    self.update_state(state='PROCESSING',
                                      meta={
                                          'stage': 'update',
                                          'i': rfk + 1,
                                          'total': refine_cnt,
                                          'image': idx + 1,
                                          'images': len(loaded_imgs)
                                      })
                    logger.debug("refine_cnt: %d, idx: %d", rfk, idx)
                    vxl_scene.update(cam,
                                     img,
                                     True,
                                     True,
                                     None,
                                     openclDevice[0:3],
                                     variance,
                                     tnear=1000.0,
                                     tfar=100000.0)

                for bp_index in range(2):
                    pass  #height map update?

                logger.debug("writing cache: %d", rfk)
                vxl_scene.write_cache()
                logger.debug("wrote cache: %d", rfk)

                if rfk < refine_cnt - 1:
                    self.update_state(state='PROCESSING',
                                      meta={
                                          'stage': 'refine',
                                          'i': rfk,
                                          'total': refine_cnt
                                      })
                    logger.debug("refining %d...", rfk)
                    vxl_scene.refine(0.3, refine_device)
                    vxl_scene.write_cache()

            with open(os.path.join(processing_dir, "scene_color.xml"),
                      'w') as fid:
                lines = open(os.path.join(processing_dir, "scene.xml"),
                             'r').readlines()
                lines = [
                    line.replace('boxm2_mog3_grey', 'boxm2_gauss_rgb').replace(
                        'boxm2_num_obs', 'boxm2_num_obs_single')
                    for line in lines
                ]
                fid.writelines(lines)

            vxl_scene = boxm2_scene_adaptor(
                os.path.join(processing_dir, "scene_color.xml"), openclDevice)

            for idx, (img, cam) in enumerate(pair):
                self.update_state(state='PROCESSING',
                                  meta={
                                      'stage': 'color_update',
                                      'i': rfk + 1,
                                      'total': refine_cnt,
                                      'image': idx + 1,
                                      'images': len(loaded_imgs)
                                  })
                logger.debug("color_paint idx: %d", idx)
                vxl_scene.update(cam,
                                 img,
                                 False,
                                 False,
                                 None,
                                 openclDevice[0:3],
                                 tnear=1000.0,
                                 tfar=100000.0)

            vxl_scene.write_cache()

            with voxel_globe.tools.storage_dir(
                    'voxel_world') as voxel_world_dir:
                copytree(processing_dir,
                         voxel_world_dir,
                         ignore=lambda x, y: ['images'])
                models.VoxelWorld.create(
                    name='%s world (%s)' %
                    (imageCollection.name, self.request.id),
                    origin=scene.origin,
                    directory=voxel_world_dir,
                    service_id=self.request.id).save()
示例#6
0
def generate_error_point_cloud(self, voxel_world_id, prob=0.5, history=None):
  from glob import glob
  import json

  import numpy as np

  from boxm2_adaptor import load_cpp, render_depth, cast_3d_point, \
                            cast_3d_point_pass2, write_cache, \
                            create_stream_cache
  from boxm2_scene_adaptor import boxm2_scene_adaptor
  from vpgl_adaptor import create_perspective_camera_krt, persp2gen, \
                           compute_direction_covariance
  from boxm2_mesh_adaptor import batch_compute_3d_points, gen_error_point_cloud

  from vsi.tools.redirect import Redirect, Logger as LoggerWrapper

  import voxel_globe.tools
  import voxel_globe.meta.models as models
  from voxel_globe.tools.camera import get_krt

  with Redirect(stdout_c=LoggerWrapper(logger, lvl=logging.INFO),
                stderr_c=LoggerWrapper(logger, lvl=logging.WARNING)):


    self.update_state(state='SETUP', meta={'pid':os.getpid()})

    voxel_world = models.VoxelWorld.objects.get(id=voxel_world_id)
    service_inputs = json.loads(voxel_world.service.inputs)
    image_collection = models.ImageCollection.objects.get(
        id=service_inputs[0][0])
    images = image_collection.images.all()
    scene = models.Scene.objects.get(id=service_inputs[0][1])

    voxel_world_dir = voxel_world.directory
    
    scene_filename = os.path.join(voxel_world_dir, 'scene.xml')

    opencl_device = os.environ['VIP_OPENCL_DEVICE']
    scene_gpu = boxm2_scene_adaptor(scene_filename, opencl_device)
    scene_cpp = boxm2_scene_adaptor(scene_filename, 'cpp')
    
    type_id_fname = "type_names_list.txt"
    image_id_fname = "image_list.txt"
    
    std_dev_angle = 0.1
    cov_c_path = 'cov_c.txt'
    cov_c = 0*np.eye(3)*0.8**2

    with voxel_globe.tools.task_dir('generate_error_point_cloud', cd=True) \
         as processing_dir:
      np.savetxt(cov_c_path, cov_c)

      for index, image in enumerate(images):
        self.update_state(state='PROCESSING', 
                          meta={'stage':'casting', 'image':index+1, 
                                'total':len(images)})

        k,r,t,o = get_krt(image.history(history))

        perspective_camera = create_perspective_camera_krt(k, r, t)

        (depth_image, variance_image, _) = render_depth(scene_gpu.scene, 
              scene_gpu.opencl_cache, perspective_camera, 
              image.imageWidth, image.imageHeight, 
              scene_gpu.device)

        cov_v_path = 'cov_%06d.txt' % index
        appearance_model = 'image_%06d' % index

        generic_camera = persp2gen(perspective_camera, image.imageWidth, 
                                   image.imageHeight)

        compute_direction_covariance(perspective_camera, std_dev_angle, 
                                     cov_v_path)

        cast_3d_point(scene_cpp.scene,scene_cpp.cpu_cache,perspective_camera,
                      generic_camera,depth_image,variance_image,appearance_model)
        cast_3d_point_pass2(scene_cpp.scene,scene_cpp.cpu_cache,generic_camera,
                            appearance_model,cov_c_path,cov_v_path)

        write_cache(scene_cpp.cpu_cache, 1)

      self.update_state(state='PROCESSING', 
                          meta={'stage':'compute error'})

      with open(image_id_fname, 'w') as fid:
        print >>fid, len(images)
        for index, image in enumerate(images):
          print >>fid, 'image_%06d' % (index)
      
      with open(type_id_fname,"w") as fid:
        print >>fid, 2
        print >>fid, "boxm2_point"
        print >>fid, "boxm2_covariance"

      mem=3.0
      stream_cache = create_stream_cache(scene_cpp.scene, type_id_fname, 
                                         image_id_fname, mem)
      batch_compute_3d_points(scene_cpp.scene, scene_cpp.cpu_cache, stream_cache)

      self.update_state(state='EXPORTING', 
                          meta={'stage':'ply'})

      with voxel_globe.tools.storage_dir('generate_error_point_cloud') \
           as storage_dir:
        ply_filename = os.path.join(storage_dir, 'model.ply')
        gen_error_point_cloud(scene_cpp.scene, scene_cpp.cpu_cache, 
          ply_filename, 0.5, True)

        potree_filename = os.path.join(storage_dir, 'potree.ply')

      with voxel_globe.tools.image_dir('point_cloud') as potree_dir:
        convert_ply_to_potree(ply_filename, potree_dir)

      models.PointCloud.create(name='%s point cloud' % image_collection.name,
        service_id=self.request.id, origin=voxel_world.origin,
        potree_url='%s://%s:%s/%s/point_cloud/%s/cloud.js' % \
          (env['VIP_IMAGE_SERVER_PROTOCOL'], env['VIP_IMAGE_SERVER_HOST'], 
           env['VIP_IMAGE_SERVER_PORT'], env['VIP_IMAGE_SERVER_URL_PATH'], 
           os.path.basename(potree_dir)),
        directory=storage_dir).save()

      voxel_files = lambda x: glob(os.path.join(voxel_world_dir, x))
      cleanup_files = []
      cleanup_files += voxel_files('boxm2_covariance_*.bin')
      cleanup_files += voxel_files('boxm2_point_*.bin')
      cleanup_files += voxel_files('float16_image_*.bin')
      for cleanup_file in cleanup_files:
        os.remove(cleanup_file)
示例#7
0
def tiepoint_registration(self, image_collection_id, history=None):
  from PIL import Image
  import numpy as np

  from django.contrib.gis import geos

  import vpgl_adaptor

  from vsi.io.krt import Krt

  from voxel_globe.meta import models
  import voxel_globe.tools
  from voxel_globe.tools.camera import get_krt, save_krt
  from voxel_globe.tools.celery import Popen

  from voxel_globe.tools.xml_dict import load_xml
  
  self.update_state(state='INITIALIZE', meta={'id':image_collection_id})


  image_collection = models.ImageCollection.objects.get(id=image_collection_id).history(history)

  control_points = {}

  for fr,image in enumerate(image_collection.images.all()):
    image = image.history(history)
    tiepoint_ids = set([x for imagen in models.Image.objects.filter(objectId=image.objectId) for x in imagen.tiepoint_set.all().values_list('objectId', flat=True)])
    for tiepoint_id in tiepoint_ids:
      tiepoint = models.TiePoint.objects.get(objectId=tiepoint_id, newerVersion=None).history(history)
      
      #demoware code hack!
      if 'error' in tiepoint.geoPoint.name.lower():
        continue
      
      if not tiepoint.deleted:
        control_point_id = tiepoint.geoPoint.objectId
        if control_point_id not in control_points:
          control_points[control_point_id] = {'tiepoints':{}}
        control_points[control_point_id]['tiepoints'][fr] = list(tiepoint.point)
        lla_xyz = models.ControlPoint.objects.get(objectId = control_point_id, newerVersion=None).history(history).point.coords
        control_points[control_point_id]['3d'] = [lla_xyz[x] for x in [1,0,2]]

  #filter only control points with more than 1 tiepoint
  control_points = {k:v for k,v in control_points.iteritems() if len(v['tiepoints'].keys()) > 1}

  origin_yxz = np.mean([v['3d'] for k,v in control_points.iteritems()], axis=0)
  lvcs = vpgl_adaptor.create_lvcs(origin_yxz[0], origin_yxz[1], origin_yxz[2], 'wgs84')
  for control_point in control_points:
    control_points[control_point]['lvcs'] = vpgl_adaptor.convert_to_local_coordinates2(lvcs, *control_points[control_point]['3d'])

  images = {}

  with voxel_globe.tools.task_dir('visualsfm', cd=True) as processing_dir:
    dummy_imagename = os.path.join(processing_dir, 'blank.jpg')
    img = Image.fromarray(np.empty([1,1], dtype=np.uint8))
    img.save(dummy_imagename)
    #Thank you stupid site file
      
    for fr,image in enumerate(image_collection.images.all()):
      (K,R,T,o) = get_krt(image.history(history), history=history)
      images[fr] = image.objectId

      with open(os.path.join(processing_dir, 'frame_%05d.txt' % fr), 'w') as fid:
        print >>fid, (("%0.18f "*3+"\n")*3) % (K[0,0], K[0,1], K[0,2], 
            K[1,0], K[1,1], K[1,2], K[2,0], K[2,1], K[2,2]);
        print >>fid, (("%0.18f "*3+"\n")*3) % (R[0,0], R[0,1], R[0,2], 
            R[1,0], R[1,1], R[1,2], R[2,0], R[2,1], R[2,2]);
        print >>fid, ("%0.18f "*3+"\n") % (T[0,0], T[1,0], T[2,0]);
    site_in_name = os.path.join(processing_dir, 'site.xml')
    site_out_name = os.path.join(processing_dir, 'site2.xml')
    with open(site_in_name, 'w') as fid:
      fid.write('''<BWM_VIDEO_SITE name="Triangulation">
  <videoSiteDir path="%s">
  </videoSiteDir>
  <videoPath path="%s">
  </videoPath>
  <cameraPath path="%s/*.txt">
  </cameraPath>
  <Objects>
  </Objects>ve
  <Correspondences>\n''' % (processing_dir, dummy_imagename, processing_dir))
      for control_point_index, control_point_id in enumerate(control_points):
        fid.write('<Correspondence id="%d">\n' % control_point_index)
        for fr, tie_point in control_points[control_point_id]['tiepoints'].iteritems():
          fid.write('<CE fr="%d" u="%f" v="%f"/>\n' % (fr, tie_point[0], tie_point[1]))
        fid.write('</Correspondence>\n')
        control_points[control_point_id]['id'] = control_point_index
      fid.write('''</Correspondences>
  </BWM_VIDEO_SITE>\n''')
    
    #triangulate the points
    Popen(['bwm_triangulate_2d_corrs', '-site', site_in_name, '-out', site_out_name], logger=logger).wait()

    #Read in the result, and load into points_triangulate structure
    xml = load_xml(site_out_name)
    points_triangulate = {'id':[], 'x':[], 'y':[], 'z':[]}
    for correspondence in xml['Correspondences']['Correspondence']:
      points_triangulate['id'].append(int(correspondence.at['id']))
      points_triangulate['x'].append(float(correspondence['corr_world_point'].at['X']))
      points_triangulate['y'].append(float(correspondence['corr_world_point'].at['Y']))
      points_triangulate['z'].append(float(correspondence['corr_world_point'].at['Z']))
      
    #Read the points out of the control points structure, but make sure they are 
    #in the same order (check id == point_id
    points_orig = {'x':[], 'y':[], 'z':[]}
    for point_id in points_triangulate['id']:
      point = [v['lvcs'] for k,v in control_points.iteritems() if v['id'] == point_id]
      points_orig['x'].append(point[0][0])
      points_orig['y'].append(point[0][1])
      points_orig['z'].append(point[0][2])
    new_cameras = os.path.join(processing_dir, 'new_cameras')
    os.mkdir(new_cameras)
    
    #Make transformation
    transform, scale = vpgl_adaptor.compute_transformation(points_triangulate['x'], points_triangulate['y'], points_triangulate['z'],
                                        points_orig['x'],points_orig['y'],points_orig['z'],
                                        processing_dir, new_cameras)

    #calculate the new bounding box
    bbox_min, bbox_max = vpgl_adaptor.compute_transformed_box(list(image_collection.scene.bbox_min), list(image_collection.scene.bbox_max), transform)
    
    #calculate the new voxel size
    default_voxel_size=geos.Point(*(x*scale for x in image_collection.scene.default_voxel_size))
    
    scene = models.Scene.create(name=image_collection.scene.name+' tiepoint registered', 
                        service_id=self.request.id,
                        origin=geos.Point(origin_yxz[1], origin_yxz[0], origin_yxz[2]),
                        bbox_min=geos.Point(*bbox_min),
                        bbox_max=geos.Point(*bbox_max),
                        default_voxel_size=default_voxel_size,
                        geolocated=True)
    scene.save()
    image_collection.scene=scene
    image_collection.save()

    for fr, image_id in images.iteritems():
      krt = Krt.load(os.path.join(new_cameras, 'frame_%05d.txt' % fr))
      image = models.Image.objects.get(objectId=image_id, newerVersion=None)
      save_krt(self.request.id, image, krt.k, krt.r, krt.t, [origin_yxz[x] for x in [1,0,2]], srid=4326)
示例#8
0
def tiepoint_error_calculation(self, image_collection_id, scene_id, history=None):
  from PIL import Image
  import numpy as np

  import vpgl_adaptor

  from voxel_globe.meta import models
  import voxel_globe.tools
  from voxel_globe.tools.camera import get_krt
  from voxel_globe.tools.celery import Popen

  from voxel_globe.tools.xml_dict import load_xml

  self.update_state(state='INITIALIZE', meta={'id':image_collection_id, 'scene':scene_id})

  image_collection = models.ImageCollection.objects.get(id=image_collection_id).history(history)

  control_points = {}

  for fr,image in enumerate(image_collection.images.all()):
    image = image.history(history)
    tiepoint_ids = set([x for imagen in models.Image.objects.filter(objectId=image.objectId) for x in imagen.tiepoint_set.all().values_list('objectId', flat=True)])
    for tiepoint_id in tiepoint_ids:
      tiepoint = models.TiePoint.objects.get(objectId=tiepoint_id, newerVersion=None).history(history)
      
      #demoware code hack!
      if not 'error' in tiepoint.geoPoint.name.lower():
        continue
      
      if not tiepoint.deleted:
        control_point_id = tiepoint.geoPoint.objectId
        if control_point_id not in control_points:
          control_points[control_point_id] = {'tiepoints':{}}
        control_points[control_point_id]['tiepoints'][fr] = list(tiepoint.point)
        lla_xyz = models.ControlPoint.objects.get(objectId = control_point_id, newerVersion=None).history(history).point.coords
        control_points[control_point_id]['3d'] = [lla_xyz[x] for x in [1,0,2]]

  #filter only control points with more than 1 tiepoint
  control_points = {k:v for k,v in control_points.iteritems() if len(v['tiepoints'].keys()) > 1}

  origin_xyz = list(models.Scene.objects.get(id=scene_id).origin)
  lvcs = vpgl_adaptor.create_lvcs(origin_xyz[1], origin_xyz[0], origin_xyz[2], 'wgs84')
  for control_point in control_points:
    control_points[control_point]['lvcs'] = vpgl_adaptor.convert_to_local_coordinates2(lvcs, *control_points[control_point]['3d'])

  images = {}

  with voxel_globe.tools.task_dir('visualsfm', cd=True) as processing_dir:
    dummy_imagename = os.path.join(processing_dir, 'blank.jpg')
    img = Image.fromarray(np.empty([1,1], dtype=np.uint8))
    img.save(dummy_imagename)
    #Thank you stupid site file
      
    for fr,image in enumerate(image_collection.images.all()):
      (K,R,T,o) = get_krt(image.history(history), history=history)
      images[fr] = image.objectId

      with open(os.path.join(processing_dir, 'frame_%05d.txt' % fr), 'w') as fid:
        print >>fid, (("%0.18f "*3+"\n")*3) % (K[0,0], K[0,1], K[0,2], 
            K[1,0], K[1,1], K[1,2], K[2,0], K[2,1], K[2,2]);
        print >>fid, (("%0.18f "*3+"\n")*3) % (R[0,0], R[0,1], R[0,2], 
            R[1,0], R[1,1], R[1,2], R[2,0], R[2,1], R[2,2]);
        print >>fid, ("%0.18f "*3+"\n") % (T[0,0], T[1,0], T[2,0]);
    site_in_name = os.path.join(processing_dir, 'site.xml')
    site_out_name = os.path.join(processing_dir, 'site2.xml')
    with open(site_in_name, 'w') as fid:
      fid.write('''<BWM_VIDEO_SITE name="Triangulation">
<videoSiteDir path="%s">
</videoSiteDir>
<videoPath path="%s">
</videoPath>
<cameraPath path="%s/*.txt">
</cameraPath>
<Objects>
</Objects>ve
<Correspondences>\n''' % (processing_dir, dummy_imagename, processing_dir))
      for control_point_index, control_point_id in enumerate(control_points):
        fid.write('<Correspondence id="%d">\n' % control_point_index)
        for fr, tie_point in control_points[control_point_id]['tiepoints'].iteritems():
          fid.write('<CE fr="%d" u="%f" v="%f"/>\n' % (fr, tie_point[0], tie_point[1]))
        fid.write('</Correspondence>\n')
        control_points[control_point_id]['id'] = control_point_index
      fid.write('''</Correspondences>
</BWM_VIDEO_SITE>\n''')
    
    #triangulate the points
    Popen(['bwm_triangulate_2d_corrs', '-site', site_in_name, '-out', site_out_name], logger=logger).wait()

    #Read in the result, and load into points_triangulate structure
    xml = load_xml(site_out_name)
    points_triangulate_id=[]
    points_triangulate=[]
    for correspondence in xml['Correspondences']['Correspondence']:
      points_triangulate_id.append(int(correspondence.at['id']))
      points_triangulate.append((float(correspondence['corr_world_point'].at['X']),
                                 float(correspondence['corr_world_point'].at['Y']),
                                 float(correspondence['corr_world_point'].at['Z'])))
      
    #Read the points out of the control points structure, but make sure they are 
    #in the same order (check id == point_id
    points_orig = []
    for point_id in points_triangulate_id:
      point = [v['lvcs'] for k,v in control_points.iteritems() if v['id'] == point_id]
      points_orig.append(point[0])  

  points_orig = np.array(points_orig)
  points_triangulate = np.array(points_triangulate)
  error = np.linalg.norm((points_orig-points_triangulate), axis=0)/(points_orig.shape[0]**0.5)

  result={}
  result['error'] = list(error)
  result['horizontal_accuracy'] = 2.4477*0.5*(error[0]+error[1])
  result['vertical_accuracy'] = 1.96*error[2]

  return result
示例#9
0
def generate_error_point_cloud(
    self, voxel_world_id, prob=0.5, position_error_override=None, orientation_error_override=None, history=None
):
    from glob import glob
    import json

    import numpy as np

    from boxm2_adaptor import (
        load_cpp,
        render_depth,
        cast_3d_point,
        cast_3d_point_pass2,
        write_cache,
        accumulate_3d_point_and_cov,
        normalize_3d_point_and_cov,
    )
    from boxm2_scene_adaptor import boxm2_scene_adaptor
    from vpgl_adaptor import create_perspective_camera_krt, persp2gen, compute_direction_covariance
    from boxm2_mesh_adaptor import gen_error_point_cloud

    from vsi.tools.redirect import StdRedirect, Logger as LoggerWrapper

    import voxel_globe.tools
    import voxel_globe.meta.models as models
    from voxel_globe.tools.camera import get_krt

    with StdRedirect(
        open(os.path.join(voxel_globe.tools.log_dir(), self.request.id) + "_out.log", "w"),
        open(os.path.join(voxel_globe.tools.log_dir(), self.request.id) + "_err.log", "w"),
    ):

        self.update_state(state="SETUP", meta={"pid": os.getpid()})

        voxel_world = models.VoxelWorld.objects.get(id=voxel_world_id)
        service_inputs = json.loads(voxel_world.service.inputs)
        image_collection = models.ImageCollection.objects.get(id=service_inputs[0][0])
        images = image_collection.images.all()
        scene = models.Scene.objects.get(id=service_inputs[0][1])

        voxel_world_dir = voxel_world.directory

        scene_filename = os.path.join(voxel_world_dir, "scene_color.xml")

        opencl_device = os.environ["VIP_OPENCL_DEVICE"]
        scene_gpu = boxm2_scene_adaptor(scene_filename, opencl_device)
        scene_cpp = boxm2_scene_adaptor(scene_filename, "cpp")

        type_id_fname = "type_names_list.txt"
        image_id_fname = "image_list.txt"

        std_dev_angle_default = 0.1
        cov_c_path = "cov_c.txt"
        cov_c_default = 0.8

        with voxel_globe.tools.task_dir("generate_error_point_cloud", cd=True) as processing_dir:
            for index, image in enumerate(images):
                self.update_state(
                    state="PROCESSING", meta={"stage": "casting", "image": index + 1, "total": len(images)}
                )

                k, r, t, o = get_krt(image.history(history))

                attributes = image.history(history).camera.history(history).attributes

                cov_c = attributes.get("position_error", std_dev_angle_default)
                if position_error_override is not None:
                    cov_c = position_error_override
                std_dev_angle = attributes.get("orientation_error", cov_c_default)
                if orientation_error_override is not None:
                    std_dev_angle = orientation_error_override
                cov_c = np.eye(3) * cov_c ** 2

                np.savetxt(cov_c_path, cov_c)

                perspective_camera = create_perspective_camera_krt(k, r, t)

                self.update_state(
                    state="PROCESSING", meta={"stage": "pre render", "image": index + 1, "total": len(images)}
                )

                (depth_image, variance_image, _) = render_depth(
                    scene_gpu.scene,
                    scene_gpu.opencl_cache,
                    perspective_camera,
                    image.imageWidth,
                    image.imageHeight,
                    scene_gpu.device,
                )

                self.update_state(
                    state="PROCESSING", meta={"stage": "post_render", "image": index + 1, "total": len(images)}
                )

                cov_v_path = "cov_%06d.txt" % index
                appearance_model = "image"

                self.update_state(
                    state="PROCESSING", meta={"stage": "pre_persp2gen", "image": index + 1, "total": len(images)}
                )

                generic_camera = persp2gen(perspective_camera, image.imageWidth, image.imageHeight)

                self.update_state(
                    state="PROCESSING", meta={"stage": "pre_covar", "image": index + 1, "total": len(images)}
                )

                compute_direction_covariance(perspective_camera, std_dev_angle, cov_v_path)

                self.update_state(
                    state="PROCESSING", meta={"stage": "pre_cast1", "image": index + 1, "total": len(images)}
                )
                cast_3d_point(
                    scene_cpp.scene,
                    scene_cpp.cpu_cache,
                    perspective_camera,
                    generic_camera,
                    depth_image,
                    variance_image,
                    appearance_model,
                )
                self.update_state(
                    state="PROCESSING", meta={"stage": "pre_cast2", "image": index + 1, "total": len(images)}
                )
                cast_3d_point_pass2(
                    scene_cpp.scene, scene_cpp.cpu_cache, generic_camera, appearance_model, cov_c_path, cov_v_path
                )

                self.update_state(
                    state="PROCESSING", meta={"stage": "pre_accumulate", "image": index + 1, "total": len(images)}
                )

                accumulate_3d_point_and_cov(scene_cpp.scene, scene_cpp.cpu_cache, appearance_model)

                # self.update_state(state='PROCESSING',
                #                  meta={'stage':'pre_write', 'image':index+1,
                #                        'total':len(images)})

                # write_cache(scene_cpp.cpu_cache, 1)

                self.update_state(
                    state="PROCESSING", meta={"stage": "post_write", "image": index + 1, "total": len(images)}
                )

            self.update_state(state="PROCESSING", meta={"stage": "compute error"})

            normalize_3d_point_and_cov(scene_cpp.scene, scene_cpp.cpu_cache)

            self.update_state(state="PROCESSING", meta={"stage": "pre_write", "image": index + 1, "total": len(images)})
            write_cache(scene_cpp.cpu_cache, 1)

            self.update_state(state="EXPORTING", meta={"stage": "ply"})

            with voxel_globe.tools.storage_dir("generate_error_point_cloud") as storage_dir:
                ply_filename = os.path.join(storage_dir, "model.ply")
                gen_error_point_cloud(scene_cpp.scene, scene_cpp.cpu_cache, ply_filename, prob)

                potree_filename = os.path.join(storage_dir, "potree.ply")

            with voxel_globe.tools.image_dir("point_cloud") as potree_dir:
                convert_ply_to_potree(ply_filename, potree_dir)

            models.PointCloud.create(
                name="%s point cloud" % image_collection.name,
                service_id=self.request.id,
                origin=voxel_world.origin,
                potree_url="%s://%s:%s/%s/point_cloud/%s/cloud.js"
                % (
                    env["VIP_IMAGE_SERVER_PROTOCOL"],
                    env["VIP_IMAGE_SERVER_HOST"],
                    env["VIP_IMAGE_SERVER_PORT"],
                    env["VIP_IMAGE_SERVER_URL_PATH"],
                    os.path.basename(potree_dir),
                ),
                filename=ply_filename,
            ).save()

            voxel_files = lambda x: glob(os.path.join(voxel_world_dir, x))
            cleanup_files = []
            cleanup_files += voxel_files("boxm2_covariance_*.bin")
            cleanup_files += voxel_files("boxm2_point_*.bin")
            cleanup_files += voxel_files("float16_image_*.bin")
            for cleanup_file in cleanup_files:
                os.remove(cleanup_file)
示例#10
0
def run_build_voxel_model_bp(self,
                             image_set_id,
                             camera_set_id,
                             scene_id,
                             bbox,
                             skip_frames,
                             cleanup=True):
    import random
    import glob
    import math

    import numpy as np

    from vsi.tools.redirect import StdRedirect
    from voxel_globe.meta import models
    from voxel_globe.tools.camera import get_krt
    import voxel_globe.tools

    from boxm2_scene_adaptor import boxm2_scene_adaptor

    import brl_init
    from vil_adaptor_boxm2_batch import load_image
    import vpgl_adaptor_boxm2_batch as vpgl

    from vsi.vxl.create_scene_xml import create_scene_xml

    from vsi.tools.dir_util import copytree
    from vsi.tools.file_util import lncp

    def rectint(recta, rectb):
        lx = max(recta[0], rectb[0])
        rx = min(recta[2], rectb[2])
        by = max(recta[1], rectb[1])
        ty = min(recta[3], rectb[3])

        if lx > rx or by > ty:
            return [0, 0, 0, 0], 0
        else:
            return [lx, by, rx, ty], (rx - lx) * (ty - by)

    def generate_subsetim(scene, camfiles, ni, nj):
        subsetIdx = []
        refIndices = []
        minDepOverlap = 0.25
        minRefOverlap = 0.5
        minIndepAngle = 5.0
        minRefAngle = 5.0
        maxRefAngle = 15.0
        minRefIndepAngle = 5.0
        cosMinIndepAngle = math.cos(minIndepAngle * math.pi / 180.0)
        cosMinRefAngle = math.cos(minRefAngle * math.pi / 180.0)
        cosMaxRefAngle = math.cos(maxRefAngle * math.pi / 180.0)
        cosMinRefIndepAngle = math.cos(minRefIndepAngle * math.pi / 180.0)
        bbox = scene.bbox
        grect = [
            scene.bbox[0][0], scene.bbox[0][1], scene.bbox[1][0],
            scene.bbox[1][1]
        ]
        worldoverlaps = []
        camrects = []
        cams = []
        princAxis = []
        for camfile in camfiles:
            pcam = vpgl.load_perspective_camera(camfile)
            prx, pry, prz = vpgl.get_backprojected_ray(pcam, ni / 2, nj / 2)
            princAxis.append([prx, pry, prz])
            Hmat = vpgl.compute_camera_to_world_homography(
                pcam, [0, 0, 1, -bbox[0][2]])
            H = np.array(Hmat).reshape([3, 3])
            ps = np.dot(
                H,
                np.transpose([[0, 0, 1], [ni, 0, 1], [ni, nj, 1], [0, nj, 1]]))
            xs = ps[0, :] / ps[2, :]
            ys = ps[1, :] / ps[2, :]
            rect = [min(xs), min(ys), max(xs), max(ys)]
            area = (rect[2] - rect[0]) * (rect[3] - rect[1])
            crect, carea = rectint(rect, grect)
            #print crect,carea
            if (carea > 0):
                cams.append(pcam)
                camrects.append(crect)
                worldoverlaps.append(carea / area)

        usedcams = [False] * len(cams)
        for i in range(0, len(cams)):
            randidx = random.randint(0, len(cams) - 1)
            while usedcams[randidx]:
                randidx = (randidx + 1) % len(cams)
            usedcams[randidx] = True
            dep = False
            for c2 in range(0, len(subsetIdx)):
                cosAngle = np.dot(princAxis[randidx], princAxis[subsetIdx[c2]])
                if cosAngle > cosMinIndepAngle:
                    rectc2 = camrects[subsetIdx[c2]]
                    overlap, oarea = rectint(camrects[randidx], rectc2)
                    tarea = (rectc2[2] - rectc2[0]) * (rectc2[3] - rectc2[1])
                    if (oarea / tarea > minDepOverlap):
                        dep = True
                        break
            if dep:
                continue
            theseRefIndices = []
            for c3 in range(0, len(cams)):
                #Check angle disparity
                cosAngle2 = np.dot(princAxis[randidx], princAxis[c3])
                if (cosAngle2 > cosMinRefAngle or cosAngle2 < cosMaxRefAngle):
                    continue
                # Check that a similar viewpoint isn't already used for reference
                refDep = False
                for c4 in range(0, len(theseRefIndices)):
                    #Check angle disparity
                    cosAngle3 = np.dot(princAxis[theseRefIndices[c4]],
                                       princAxis[c3])
                    if (cosAngle3 > cosMinRefIndepAngle):
                        refDep = True
                        break
                    #If similar viewpoint don't add
                if (refDep):
                    continue
                theseRefIndices.append(c3)
                #If at least one reference image save this viewpoint
            if len(theseRefIndices) > 0:
                subsetIdx.append(randidx)
                refIndices.append(theseRefIndices)
        return subsetIdx, refIndices

    def update_bp(scene,
                  images,
                  cameras,
                  do_update_image=True,
                  do_update_hmap=False):
        _, ni, nj = load_image(images[0])
        frames, refimages = generate_subsetim(scene, cameras, ni, nj)
        for file_name in glob.glob(os.path.join(scene.model_dir,
                                                'boxm2_*.bin')):
            os.remove(file_name)
        scene.init_uniform_prob()

        sradius = 16
        idents = []
        weights = []
        if do_update_image:
            idents.append("if")
            weights.append(1.0)
        if do_update_hmap:
            idents.append("hf")
            weights.append(2.0)

        for idx, i in enumerate(frames):
            if do_update_image:
                print "Iteration ", idx, "Image ", images[i]
                ####load image and camera
                viewid = os.path.splitext(os.path.basename(images[i]))[0]
                #### forming an app model using the neighbor images
                for lindex in refimages[idx]:
                    lcam = vpgl.load_perspective_camera(cameras[lindex])
                    limg, ni, nj = load_image(images[lindex])
                    scene.update(lcam, limg, False, True, None, "gpu0", 0.05,
                                 viewid)

                scene.update_if(False, viewid)  # subtracting the image factor
                scene.fuse_factors(idents, weights)
                pcam = vpgl.load_perspective_camera(cameras[i])
                img, ni, nj = load_image(images[i])
                scene.compute_pre_post(pcam, img, viewid, 100000, 100000)
                # computing the new image factor
                scene.update_if(True, viewid)  # adding the image factor
                scene.fuse_factors(idents, weights)

            if do_update_hmap and idx % 2 == 0:
                scene.update_hf(False)  # subtracting the height-map factor
                scene.fuse_factors(idents, weights)
                zimg, zvar, ximg, yimg, probimg = scene.render_height_map()
                #save_image(zimg, "./zimg.tif")
                scene.compute_hmapf(zimg, zvar, ximg, yimg,
                                    sradius)  # computing the height-map factor
                scene.update_hf(True)  # adding the height-map factor
                scene.fuse_factors(idents, weights)

        scene.write_cache()

    def refine(scene):
        scene.refine(0.3)
        for filename in glob.glob(os.path.join(scene.model_dir, '[a-b]*.bin')):
            os.remove(filename)
        scene.write_cache()

    with StdRedirect(
            open(
                os.path.join(voxel_globe.tools.log_dir(), self.request.id) +
                '_out.log', 'w'),
            open(
                os.path.join(voxel_globe.tools.log_dir(), self.request.id) +
                '_err.log', 'w')):

        openclDevice = os.environ['VIP_OPENCL_DEVICE']
        opencl_memory = os.environ.get('VIP_OPENCL_MEMORY', None)
        if opencl_memory:
            opencl_memory = int(opencl_memory)

        scene = models.Scene.objects.get(id=scene_id)
        imageSet = models.ImageSet.objects.get(id=image_set_id)
        imageList = imageSet.images.all()

        with voxel_globe.tools.task_dir('voxel_world') as processing_dir:
            logger.warning(bbox)

            create_scene_xml(openclDevice,
                             0,
                             float(bbox['voxel_size']),
                             lvcs1=(float(bbox['x_min']), float(bbox['y_min']),
                                    float(bbox['z_min'])),
                             lvcs2=(float(bbox['x_max']), float(bbox['y_max']),
                                    float(bbox['z_max'])),
                             origin=scene.origin,
                             model_dir='.',
                             number_bins=1,
                             output_file=open(
                                 os.path.join(processing_dir, 'scene.xml'),
                                 'w'),
                             n_bytes_gpu=opencl_memory)

            counter = 1

            imageNames = []
            cameraNames = []

            os.mkdir(os.path.join(processing_dir, 'local'))

            #Prepping
            self.update_state(state='INITIALIZE',
                              meta={
                                  'image_set_name': imageSet.name,
                                  'stage': 'camera fetch'
                              })
            for image in imageList:
                (K, R, T, o) = get_krt(image, camera_set_id)

                krtName = os.path.join(processing_dir, 'local',
                                       'frame_%05d.krt' % counter)

                with open(krtName, 'w') as fid:
                    print >> fid, (("%0.18f " * 3 + "\n") * 3) % (
                        K[0, 0], K[0, 1], K[0, 2], K[1, 0], K[1, 1], K[1, 2],
                        K[2, 0], K[2, 1], K[2, 2])
                    print >> fid, (("%0.18f " * 3 + "\n") * 3) % (
                        R[0, 0], R[0, 1], R[0, 2], R[1, 0], R[1, 1], R[1, 2],
                        R[2, 0], R[2, 1], R[2, 2])

                    print >> fid, ("%0.18f " * 3 + "\n") % (T[0, 0], T[1, 0],
                                                            T[2, 0])

                imageName = image.filename_path
                extension = os.path.splitext(imageName)[1]
                localName = os.path.join(processing_dir, 'local',
                                         'frame_%05d%s' % (counter, extension))
                lncp(imageName, localName)

                counter += 1

                imageNames.append(localName)
                cameraNames.append(krtName)

            variance = 0.06

            vxl_scene = boxm2_scene_adaptor(
                os.path.join(processing_dir, "scene.xml"), openclDevice)
            # loaded_imgs = []
            # loaded_cams = []

            # for i in range(0, len(imageNames), skip_frames):
            #   logger.debug("i: %d img name: %s cam name: %s", i, imageNames[i],
            #                cameraNames[i])
            #   self.update_state(state='PRELOADING', meta={'image_set_name': imageSet.name,
            #                                               'stage':'image load',
            #                                               'i':i,
            #                                               'total':len(imageNames)})
            #   img, ni, nj = load_image(imageNames[i])
            #   loaded_imgs.append(img)
            #   pcam = load_perspective_camera(cameraNames[i])
            #   loaded_cams.append(pcam)

            refine_cnt = 2

            for rfk in range(0, refine_cnt, 1):

                self.update_state(state='PROCESSING',
                                  meta={
                                      'image_set_name': imageSet.name,
                                      'stage': 'update 1'
                                  })
                update_bp(vxl_scene, imageNames, cameraNames)
                # self.update_state(state='PROCESSING', meta={'image_set_name': imageSet.name,
                #     'stage':'update 2'})
                # update_bp(vxl_scene, imageNames, cameraNames, True, True)
                # self.update_state(state='PROCESSING', meta={'image_set_name': imageSet.name,
                #     'stage':'update 3'})
                # update_bp(vxl_scene, imageNames, cameraNames, True, True)

                if rfk < refine_cnt - 1:
                    self.update_state(state='PROCESSING',
                                      meta={
                                          'image_set_name': imageSet.name,
                                          'stage': 'refine',
                                          'i': rfk + 1,
                                          'total': refine_cnt
                                      })
                    refine(vxl_scene)

            #Update color appearance

            with open(os.path.join(processing_dir, "scene_color.xml"),
                      'w') as fid:
                lines = open(os.path.join(processing_dir, "scene.xml"),
                             'r').readlines()
                lines = [
                    line.replace('boxm2_mog3_grey', 'boxm2_gauss_rgb').replace(
                        'boxm2_num_obs', 'boxm2_num_obs_single')
                    for line in lines
                ]
                fid.writelines(lines)

            vxl_scene = boxm2_scene_adaptor(
                os.path.join(processing_dir, "scene_color.xml"), openclDevice)

            for idx, (image_name,
                      camera_name) in enumerate(zip(imageNames, cameraNames)):
                self.update_state(state='PROCESSING',
                                  meta={
                                      'image_set_name': imageSet.name,
                                      'stage': 'color_update',
                                      'i': idx + 1,
                                      'total': len(imageNames),
                                      'images': len(imageNames)
                                  })
                img, _, _ = load_image(image_name)
                pcam = vpgl.load_perspective_camera(camera_name)
                logger.debug("color_paint idx: %d", idx)
                vxl_scene.update(pcam,
                                 img,
                                 False,
                                 False,
                                 None,
                                 openclDevice,
                                 tnear=1000.0,
                                 tfar=100000.0)

            vxl_scene.write_cache()

            with voxel_globe.tools.storage_dir(
                    'voxel_world') as voxel_world_dir:
                copytree(processing_dir,
                         voxel_world_dir,
                         ignore=lambda x, y: ['local'])
                models.VoxelWorld(name='%s world (%s)' %
                                  (imageSet.name, self.request.id),
                                  origin=scene.origin,
                                  directory=voxel_world_dir,
                                  service_id=self.request.id).save()

        return {"image_set_name": imageSet.name}
示例#11
0
def run_build_voxel_model(self, image_set_id, camera_set_id, scene_id, bbox, 
                          skip_frames, cleanup=True):

  import random

  from vsi.tools.redirect import StdRedirect
  from voxel_globe.meta import models
  from voxel_globe.tools.camera import get_krt
  import voxel_globe.tools

  from boxm2_scene_adaptor import boxm2_scene_adaptor

  import brl_init
  from vil_adaptor_boxm2_batch import load_image
  from vpgl_adaptor_boxm2_batch import load_perspective_camera

  from vsi.vxl.create_scene_xml import create_scene_xml

  from vsi.tools.dir_util import copytree
  from vsi.tools.file_util import lncp

  with StdRedirect(open(os.path.join(voxel_globe.tools.log_dir(), 
                                     self.request.id)+'_out.log', 'w'),
                   open(os.path.join(voxel_globe.tools.log_dir(), 
                                     self.request.id)+'_err.log', 'w')):

    openclDevice = os.environ['VIP_OPENCL_DEVICE']
    opencl_memory = os.environ.get('VIP_OPENCL_MEMORY', None)
    if opencl_memory:
      opencl_memory = int(opencl_memory)

    scene = models.Scene.objects.get(id=scene_id)

    imageSet = models.ImageSet.objects.get(\
        id=image_set_id)
    imageList = imageSet.images.all()

    with voxel_globe.tools.task_dir('voxel_world') as processing_dir:

      logger.warning(bbox)

      create_scene_xml(openclDevice, 3, float(bbox['voxel_size']), 
          lvcs1=(float(bbox['x_min']), float(bbox['y_min']), 
                 float(bbox['z_min'])), 
          lvcs2=(float(bbox['x_max']), float(bbox['y_max']), 
                 float(bbox['z_max'])),
          origin=scene.origin, model_dir='.', number_bins=1,
          output_file=open(os.path.join(processing_dir, 'scene.xml'), 'w'),
          n_bytes_gpu=opencl_memory)

      counter = 1

      imageNames = []
      cameraNames = []

      os.mkdir(os.path.join(processing_dir, 'local'))
      
      #Prepping
      for image in imageList:
        self.update_state(state='INITIALIZE', meta={'image_set_name': imageSet.name,
                                                    'stage':'image fetch', 
                                                    'i':counter, 
                                                    'total':len(imageList)})
        (K,R,T,o) = get_krt(image, camera_set_id)
        
        krtName = os.path.join(processing_dir, 'local', 'frame_%05d.krt' % counter)
        
        with open(krtName, 'w') as fid:
          print >>fid, (("%0.18f "*3+"\n")*3) % (K[0,0], K[0,1], K[0,2], 
              K[1,0], K[1,1], K[1,2], K[2,0], K[2,1], K[2,2])
          print >>fid, (("%0.18f "*3+"\n")*3) % (R[0,0], R[0,1], R[0,2], 
              R[1,0], R[1,1], R[1,2], R[2,0], R[2,1], R[2,2])
    
          print >>fid, ("%0.18f "*3+"\n") % (T[0,0], T[1,0], T[2,0])
        
        imageName = image.filename_path
        extension = os.path.splitext(imageName)[1]
        localName = os.path.join(processing_dir, 'local', 
                                 'frame_%05d%s' % (counter, extension))
        lncp(imageName, localName)
        
        counter += 1
      
        imageNames.append(localName)
        cameraNames.append(krtName)
        
      variance = 0.06
      
      vxl_scene = boxm2_scene_adaptor(os.path.join(processing_dir, "scene.xml"),
                                  openclDevice)
   
      loaded_imgs = []
      loaded_cams = []
    
      for i in range(0, len(imageNames), skip_frames):
        logger.debug("i: %d img name: %s cam name: %s", i, imageNames[i], 
                     cameraNames[i])
        self.update_state(state='PRELOADING', meta={'image_set_name': imageSet.name,
                                                    'stage':'image load', 
                                                    'i':i, 
                                                    'total':len(imageNames)})
        img, ni, nj = load_image(imageNames[i])
        loaded_imgs.append(img)
        pcam = load_perspective_camera(cameraNames[i])
        loaded_cams.append(pcam)
    
      refine_cnt = 5

      for rfk in range(0, refine_cnt, 1):
        pair = zip(loaded_imgs, loaded_cams)
        random.shuffle(pair)
        for idx, (img, cam) in enumerate(pair):
          self.update_state(state='PROCESSING', meta={'image_set_name': imageSet.name,
              'stage':'update', 
              'i':rfk+1, 'total':refine_cnt, 'image':idx+1, 
              'images':len(loaded_imgs)})
          logger.debug("refine_cnt: %d, idx: %d", rfk, idx)
          vxl_scene.update(cam,img,True,True,None,openclDevice,variance,
                       tnear = 1000.0, tfar = 100000.0)
    
        logger.debug("writing cache: %d", rfk)
        vxl_scene.write_cache()
        logger.debug("wrote cache: %d", rfk)
        
        if rfk < refine_cnt-1:
          self.update_state(state='PROCESSING', meta={'image_set_name': imageSet.name,
                                                      'stage':'refine', 
                                                      'i':rfk, 
                                                      'total':refine_cnt})
          logger.debug("refining %d...", rfk)
          vxl_scene.refine(0.3, openclDevice)
          vxl_scene.write_cache()


      with open(os.path.join(processing_dir, "scene_color.xml"), 'w') as fid:
        lines = open(os.path.join(processing_dir, "scene.xml"), 
                                  'r').readlines()
        lines = [line.replace('boxm2_mog3_grey', 
                              'boxm2_gauss_rgb').replace(
                              'boxm2_num_obs',
                              'boxm2_num_obs_single') for line in lines]
        fid.writelines(lines)

      vxl_scene = boxm2_scene_adaptor(os.path.join(processing_dir, 
                                                   "scene_color.xml"),
                                      openclDevice)

      for idx, (img, cam) in enumerate(pair):
        self.update_state(state='PROCESSING', meta={'image_set_name': imageSet.name,
                                                    'stage':'color_update', 
            'i':rfk+1, 'total':refine_cnt, 'image':idx+1, 
            'images':len(loaded_imgs)})
        logger.debug("color_paint idx: %d", idx)
        vxl_scene.update(cam,img,False,False,None,openclDevice,
                         tnear = 1000.0, tfar = 100000.0)

      vxl_scene.write_cache()

      with voxel_globe.tools.storage_dir('voxel_world') as voxel_world_dir:
        copytree(processing_dir, voxel_world_dir, ignore=lambda x,y:['local'])
        models.VoxelWorld(
            name='%s world (%s)' % (imageSet.name, self.request.id),
            origin=scene.origin,
            directory=voxel_world_dir,
            service_id=self.request.id).save()

    return {"image_set_name" : imageSet.name}
示例#12
0
def run_build_voxel_model_bp(self, image_set_id, camera_set_id, scene_id, bbox, 
                             skip_frames, cleanup=True):
  import random
  import glob
  import math

  import numpy as np

  from vsi.tools.redirect import StdRedirect
  from voxel_globe.meta import models
  from voxel_globe.tools.camera import get_krt
  import voxel_globe.tools

  from boxm2_scene_adaptor import boxm2_scene_adaptor

  import brl_init
  from vil_adaptor_boxm2_batch import load_image
  import vpgl_adaptor_boxm2_batch as vpgl

  from vsi.vxl.create_scene_xml import create_scene_xml

  from vsi.tools.dir_util import copytree
  from vsi.tools.file_util import lncp

  def rectint(recta,rectb):
    lx = max(recta[0],rectb[0])
    rx = min(recta[2],rectb[2])
    by = max(recta[1],rectb[1])
    ty = min(recta[3],rectb[3])

    if lx > rx or by > ty :
      return [0,0,0,0],0
    else:
      return [lx,by,rx,ty], (rx-lx)*(ty-by)

  def generate_subsetim(scene,camfiles,ni,nj):
    subsetIdx = []
    refIndices = []
    minDepOverlap = 0.25
    minRefOverlap = 0.5
    minIndepAngle = 5.0
    minRefAngle = 5.0
    maxRefAngle = 15.0
    minRefIndepAngle = 5.0
    cosMinIndepAngle = math.cos( minIndepAngle*math.pi/180.0 );
    cosMinRefAngle = math.cos( minRefAngle*math.pi/180.0 );
    cosMaxRefAngle = math.cos( maxRefAngle*math.pi/180.0 );
    cosMinRefIndepAngle = math.cos( minRefIndepAngle*math.pi/180.0 );
    bbox =  scene.bbox
    grect=[scene.bbox[0][0],scene.bbox[0][1],scene.bbox[1][0],scene.bbox[1][1]]
    worldoverlaps = []
    camrects = []
    cams = []
    princAxis = [] 
    for camfile in camfiles:
      pcam = vpgl.load_perspective_camera(camfile)
      prx,pry,prz=vpgl.get_backprojected_ray(pcam,ni/2,nj/2)
      princAxis.append([prx,pry,prz])
      Hmat = vpgl.compute_camera_to_world_homography(pcam,[0,0,1,-bbox[0][2]])
      H = np.array(Hmat).reshape([3,3])
      ps =  np.dot(H,np.transpose([[0,0,1],
                     [ni,0,1],
                     [ni,nj,1],
                     [0,nj,1]]))
      xs =  ps[0,:]/ps[2,:]
      ys =  ps[1,:]/ps[2,:]
      rect = [min(xs),min(ys),max(xs),max(ys)]
      area = (rect[2]-rect[0])*(rect[3]-rect[1])
      crect,carea = rectint(rect,grect)
      #print crect,carea
      if ( carea > 0 ):
        cams.append(pcam)
        camrects.append(crect)
        worldoverlaps.append(carea/area)

    usedcams = [False]*len(cams)
    for i in range(0,len(cams)):
      randidx = random.randint(0,len(cams)-1)
      while usedcams[randidx]:
        randidx = (randidx+1)%len(cams)
      usedcams[randidx]= True
      dep = False
      for c2 in range(0,len(subsetIdx)):
        cosAngle = np.dot(princAxis[randidx], princAxis[subsetIdx[c2]] )
        if  cosAngle > cosMinIndepAngle :
          rectc2 = camrects[subsetIdx[c2]]
          overlap,oarea = rectint(camrects[randidx] , rectc2)
          tarea = (rectc2[2]-rectc2[0])*(rectc2[3]-rectc2[1])
          if( oarea/tarea > minDepOverlap ):
            dep = True
            break
      if dep:
        continue
      theseRefIndices= []
      for c3 in range(0,len(cams)):
        #Check angle disparity
        cosAngle2 = np.dot(princAxis[randidx],princAxis[c3] );
        if( cosAngle2 > cosMinRefAngle or cosAngle2 < cosMaxRefAngle ):
          continue
        # Check that a similar viewpoint isn't already used for reference
        refDep = False
        for c4 in range(0,len(theseRefIndices)):
          #Check angle disparity
          cosAngle3 = np.dot(princAxis[theseRefIndices[c4]],princAxis[c3] );
          if( cosAngle3 > cosMinRefIndepAngle ):
            refDep = True
            break
          #If similar viewpoint don't add
        if( refDep ):
          continue
        theseRefIndices.append(c3)
            #If at least one reference image save this viewpoint
      if len(theseRefIndices) > 0 :
        subsetIdx.append( randidx );
        refIndices.append( theseRefIndices );
    return subsetIdx, refIndices

  def update_bp(scene, images, cameras, do_update_image=True, do_update_hmap=False):
    _, ni, nj = load_image (images[0])
    frames,refimages = generate_subsetim(scene,cameras,ni,nj)
    for file_name in glob.glob(os.path.join(scene.model_dir, 'boxm2_*.bin')):
      os.remove(file_name)
    scene.init_uniform_prob()
    
    sradius = 16
    idents = []
    weights = []
    if do_update_image:
      idents.append("if")
      weights.append(1.0)
    if do_update_hmap:
      idents.append("hf")
      weights.append(2.0)

    for idx, i in enumerate(frames):
      if do_update_image:
        print "Iteration ",idx,  "Image " , images[i];
        ####load image and camera
        viewid = os.path.splitext(os.path.basename(images[i]))[0]
        #### forming an app model using the neighbor images
        for lindex in refimages[idx]:
          lcam        = vpgl.load_perspective_camera(cameras[lindex]); 
          limg, ni, nj = load_image (images[lindex]);
          scene.update(lcam, limg,False, True,None ,"gpu0",0.05,viewid)

        scene.update_if(False,viewid)       # subtracting the image factor 
        scene.fuse_factors(idents,weights)  
        pcam        = vpgl.load_perspective_camera(cameras[i]); 
        img, ni, nj = load_image (images[i]);
        scene.compute_pre_post(pcam, img,viewid,100000,100000); # computing the new image factor 
        scene.update_if(True,viewid)       # adding the image factor 
        scene.fuse_factors(idents,weights)

      if do_update_hmap and idx % 2 == 0:     
        scene.update_hf(False)              # subtracting the height-map factor 
        scene.fuse_factors(idents,weights)
        zimg,zvar,ximg,yimg,probimg = scene.render_height_map()
        #save_image(zimg, "./zimg.tif")
        scene.compute_hmapf(zimg,zvar,ximg,yimg,sradius) # computing the height-map factor
        scene.update_hf(True)                            # adding the height-map factor
        scene.fuse_factors(idents,weights)

    scene.write_cache()

  def refine(scene):
    scene.refine(0.3)
    for filename in glob.glob(os.path.join(scene.model_dir, '[a-b]*.bin')):
      os.remove(filename)
    scene.write_cache()

  with StdRedirect(open(os.path.join(voxel_globe.tools.log_dir(), 
                                     self.request.id)+'_out.log', 'w'),
                   open(os.path.join(voxel_globe.tools.log_dir(), 
                                     self.request.id)+'_err.log', 'w')):

    openclDevice = os.environ['VIP_OPENCL_DEVICE']
    opencl_memory = os.environ.get('VIP_OPENCL_MEMORY', None)
    if opencl_memory:
      opencl_memory = int(opencl_memory)

    scene = models.Scene.objects.get(id=scene_id)
    imageSet = models.ImageSet.objects.get(id=image_set_id)
    imageList = imageSet.images.all()

    with voxel_globe.tools.task_dir('voxel_world') as processing_dir:
      logger.warning(bbox)

      create_scene_xml(openclDevice, 0, float(bbox['voxel_size']), 
          lvcs1=(float(bbox['x_min']), float(bbox['y_min']), 
                 float(bbox['z_min'])), 
          lvcs2=(float(bbox['x_max']), float(bbox['y_max']), 
                 float(bbox['z_max'])),
          origin=scene.origin, model_dir='.', number_bins=1,
          output_file=open(os.path.join(processing_dir, 'scene.xml'), 'w'),
          n_bytes_gpu=opencl_memory)

      counter = 1

      imageNames = []
      cameraNames = []

      os.mkdir(os.path.join(processing_dir, 'local'))
      
      #Prepping
      self.update_state(state='INITIALIZE', meta={'image_set_name': imageSet.name,
                                                  'stage':'camera fetch'})
      for image in imageList:
        (K,R,T,o) = get_krt(image, camera_set_id)
        
        krtName = os.path.join(processing_dir, 'local', 'frame_%05d.krt' % counter)
        
        with open(krtName, 'w') as fid:
          print >>fid, (("%0.18f "*3+"\n")*3) % (K[0,0], K[0,1], K[0,2], 
              K[1,0], K[1,1], K[1,2], K[2,0], K[2,1], K[2,2])
          print >>fid, (("%0.18f "*3+"\n")*3) % (R[0,0], R[0,1], R[0,2], 
              R[1,0], R[1,1], R[1,2], R[2,0], R[2,1], R[2,2])
    
          print >>fid, ("%0.18f "*3+"\n") % (T[0,0], T[1,0], T[2,0])
        
        imageName = image.filename_path
        extension = os.path.splitext(imageName)[1]
        localName = os.path.join(processing_dir, 'local', 
                                 'frame_%05d%s' % (counter, extension))
        lncp(imageName, localName)
        
        counter += 1
      
        imageNames.append(localName)
        cameraNames.append(krtName)
        
      variance = 0.06
      
      vxl_scene = boxm2_scene_adaptor(os.path.join(processing_dir, "scene.xml"),
                                  openclDevice)
      # loaded_imgs = []
      # loaded_cams = []
    
      # for i in range(0, len(imageNames), skip_frames):
      #   logger.debug("i: %d img name: %s cam name: %s", i, imageNames[i], 
      #                cameraNames[i])
      #   self.update_state(state='PRELOADING', meta={'image_set_name': imageSet.name,
      #                                               'stage':'image load', 
      #                                               'i':i, 
      #                                               'total':len(imageNames)})
      #   img, ni, nj = load_image(imageNames[i])
      #   loaded_imgs.append(img)
      #   pcam = load_perspective_camera(cameraNames[i])
      #   loaded_cams.append(pcam)
    
      refine_cnt = 2

      for rfk in range(0, refine_cnt, 1):

        self.update_state(state='PROCESSING', meta={'image_set_name': imageSet.name,
            'stage':'update 1'})
        update_bp(vxl_scene, imageNames, cameraNames)
      # self.update_state(state='PROCESSING', meta={'image_set_name': imageSet.name,
      #     'stage':'update 2'})
      # update_bp(vxl_scene, imageNames, cameraNames, True, True)
      # self.update_state(state='PROCESSING', meta={'image_set_name': imageSet.name,
      #     'stage':'update 3'})
      # update_bp(vxl_scene, imageNames, cameraNames, True, True)

        if rfk < refine_cnt-1:
          self.update_state(state='PROCESSING', 
                            meta={'image_set_name': imageSet.name,
                                  'stage':'refine', 'i':rfk+1, 
                                  'total':refine_cnt})
          refine(vxl_scene)

      #Update color appearance

      with open(os.path.join(processing_dir, "scene_color.xml"), 'w') as fid:
        lines = open(os.path.join(processing_dir, "scene.xml"), 
                                  'r').readlines()
        lines = [line.replace('boxm2_mog3_grey', 
                              'boxm2_gauss_rgb').replace(
                              'boxm2_num_obs',
                              'boxm2_num_obs_single') for line in lines]
        fid.writelines(lines)

      vxl_scene = boxm2_scene_adaptor(os.path.join(processing_dir, 
                                                   "scene_color.xml"),
                                      openclDevice)

      for idx, (image_name, camera_name) in enumerate(zip(imageNames, cameraNames)):
        self.update_state(state='PROCESSING', meta={
            'image_set_name': imageSet.name,
            'stage':'color_update', 
            'i':idx+1, 'total':len(imageNames),
            'images':len(imageNames)})
        img, _, _ = load_image(image_name)
        pcam = vpgl.load_perspective_camera(camera_name)
        logger.debug("color_paint idx: %d", idx)
        vxl_scene.update(pcam,img,False,False,None,openclDevice,
                         tnear = 1000.0, tfar = 100000.0)

      vxl_scene.write_cache()

      with voxel_globe.tools.storage_dir('voxel_world') as voxel_world_dir:
        copytree(processing_dir, voxel_world_dir, ignore=lambda x,y:['local'])
        models.VoxelWorld(
            name='%s world (%s)' % (imageSet.name, self.request.id),
            origin=scene.origin,
            directory=voxel_world_dir,
            service_id=self.request.id).save()

    return {"image_set_name" : imageSet.name}
示例#13
0
def run_build_voxel_model(self, image_collection_id, scene_id, bbox, 
                          skip_frames, cleanup=True, history=None):
  from distutils.dir_util import remove_tree
  from shutil import move
  import random

  from vsi.tools.redirect import Redirect, Logger as LoggerWrapper
  from voxel_globe.meta import models
  from voxel_globe.tools.camera import get_krt
  import voxel_globe.tools

  from boxm2_scene_adaptor import boxm2_scene_adaptor

  from vil_adaptor import load_image
  from vpgl_adaptor import load_perspective_camera
  from voxel_globe.tools.wget import download as wget

  from vsi.vxl.create_scene_xml import create_scene_xml

  from vsi.tools.dir_util import copytree, mkdtemp

  with Redirect(stdout_c=LoggerWrapper(logger, lvl=logging.INFO),
                stderr_c=LoggerWrapper(logger, lvl=logging.WARNING)):
    
    openclDevice = os.environ['VIP_OPENCL_DEVICE']
    opencl_memory = os.environ.get('VIP_OPENCL_MEMORY', None)
    
    scene = models.Scene.objects.get(id=scene_id)
    
    imageCollection = models.ImageCollection.objects.get(\
        id=image_collection_id).history(history);
    imageList = imageCollection.images.all();

    with voxel_globe.tools.task_dir('voxel_world') as processing_dir:

      logger.warning(bbox)

      if bbox['geolocated']:
        create_scene_xml(openclDevice, 3, float(bbox['voxel_size']), 
            lla1=(float(bbox['x_min']), float(bbox['y_min']), 
                  float(bbox['z_min'])), 
            lla2=(float(bbox['x_max']), float(bbox['y_max']), 
                  float(bbox['z_max'])),
            origin=scene.origin, model_dir='.', number_bins=1,
            output_file=open(os.path.join(processing_dir, 'scene.xml'), 'w'),
            n_bytes_gpu=opencl_memory)
      else:
        create_scene_xml(openclDevice, 3, float(bbox['voxel_size']), 
            lvcs1=(float(bbox['x_min']), float(bbox['y_min']), 
                   float(bbox['z_min'])), 
            lvcs2=(float(bbox['x_max']), float(bbox['y_max']), 
                   float(bbox['z_max'])),
            origin=scene.origin, model_dir='.', number_bins=1,
            output_file=open(os.path.join(processing_dir, 'scene.xml'), 'w'),
            n_bytes_gpu=opencl_memory)

      counter = 1;
      
      imageNames = []
      cameraNames = []

      os.mkdir(os.path.join(processing_dir, 'local'))
      
      #Prepping
      for image in imageList:
        self.update_state(state='INITIALIZE', meta={'stage':'image fetch', 
                                                    'i':counter, 
                                                    'total':len(imageList)})
        image = image.history(history)
        (K,R,T,o) = get_krt(image.history(history), history=history)
        
        krtName = os.path.join(processing_dir, 'local', 'frame_%05d.krt' % counter)
        
        with open(krtName, 'w') as fid:
          print >>fid, (("%0.18f "*3+"\n")*3) % (K[0,0], K[0,1], K[0,2], 
              K[1,0], K[1,1], K[1,2], K[2,0], K[2,1], K[2,2]);
          print >>fid, (("%0.18f "*3+"\n")*3) % (R[0,0], R[0,1], R[0,2], 
              R[1,0], R[1,1], R[1,2], R[2,0], R[2,1], R[2,2]);
    
          print >>fid, ("%0.18f "*3+"\n") % (T[0,0], T[1,0], T[2,0]);
        
        imageName = image.originalImageUrl;
        extension = os.path.splitext(imageName)[1]
        localName = os.path.join(processing_dir, 'local', 
                                 'frame_%05d%s' % (counter, extension));
        wget(imageName, localName, secret=True)
        
        counter += 1;
      
        imageNames.append(localName)
        cameraNames.append(krtName)
        
      variance = 0.06
      
      vxl_scene = boxm2_scene_adaptor(os.path.join(processing_dir, "scene.xml"),
                                  openclDevice);
    
      current_level = 0;
    
      loaded_imgs = [];
      loaded_cams = [];
    
      for i in range(0, len(imageNames), skip_frames):
        logger.debug("i: %d img name: %s cam name: %s", i, imageNames[i], 
                     cameraNames[i])
        self.update_state(state='PRELOADING', meta={'stage':'image load', 
                                                    'i':i, 
                                                    'total':len(imageNames)})
        img, ni, nj = load_image(imageNames[i])
        loaded_imgs.append(img)
        pcam = load_perspective_camera(cameraNames[i])
        loaded_cams.append(pcam)
    
      refine_cnt = 5;
      for rfk in range(0, refine_cnt, 1):
        pair = zip(loaded_imgs, loaded_cams)
        random.shuffle(pair)
        for idx, (img, cam) in enumerate(pair):
          self.update_state(state='PROCESSING', meta={'stage':'update', 
              'i':rfk+1, 'total':refine_cnt, 'image':idx+1, 
              'images':len(loaded_imgs)})
          logger.debug("refine_cnt: %d, idx: %d", rfk, idx)
          vxl_scene.update(cam,img,True,True,None,openclDevice[0:3],variance,
                       tnear = 1000.0, tfar = 100000.0);
    
        logger.debug("writing cache: %d", rfk)
        vxl_scene.write_cache();
        logger.debug("wrote cache: %d", rfk)
        
        if rfk < refine_cnt-1:
          self.update_state(state='PROCESSING', meta={'stage':'refine', 
                                                      'i':rfk, 
                                                      'total':refine_cnt})
          logger.debug("refining %d...", rfk)
          refine_device = openclDevice[0:3]
          if refine_device == 'cpu':
            refine_device = 'cpp'
          vxl_scene.refine(0.3, refine_device);
          vxl_scene.write_cache();

      
      voxel_world_dir = mkdtemp(dir=os.environ['VIP_STORAGE_DIR'])
      copytree(processing_dir, voxel_world_dir, ignore=lambda x,y:['images'])
      models.VoxelWorld.create(
          name='%s world (%s)' % (imageCollection.name, self.request.id),
          origin=scene.origin,
          directory=voxel_world_dir,
          service_id=self.request.id).save();
示例#14
0
def generate_error_point_cloud(self,
                               voxel_world_id,
                               camera_set_id,
                               prob=0.5,
                               position_error_override=None,
                               orientation_error_override=None,
                               number_images=None):
    from glob import glob
    import json
    import random

    import numpy as np

    from boxm2_adaptor import load_cpp, render_depth, cast_3d_point, \
                              cast_3d_point_pass2, write_cache, \
                              accumulate_3d_point_and_cov, \
                              normalize_3d_point_and_cov
    from boxm2_scene_adaptor import boxm2_scene_adaptor
    from vpgl_adaptor_boxm2_batch import create_perspective_camera_krt, \
                             persp2gen, compute_direction_covariance
    from boxm2_mesh_adaptor import gen_error_point_cloud

    from vsi.tools.redirect import StdRedirect, Logger as LoggerWrapper

    import voxel_globe.tools
    import voxel_globe.meta.models as models
    from voxel_globe.tools.camera import get_krt

    with StdRedirect(
            open(
                os.path.join(voxel_globe.tools.log_dir(), self.request.id) +
                '_out.log', 'w'),
            open(
                os.path.join(voxel_globe.tools.log_dir(), self.request.id) +
                '_err.log', 'w')):

        self.update_state(state='SETUP', meta={'pid': os.getpid()})

        voxel_world = models.VoxelWorld.objects.get(id=voxel_world_id)
        service_inputs = json.loads(voxel_world.service.inputs)
        image_set = models.ImageSet.objects.get(id=service_inputs[0][0])
        images = image_set.images.all()

        voxel_world_dir = voxel_world.directory

        scene_filename = os.path.join(voxel_world_dir, 'scene_color.xml')

        opencl_device = os.environ['VIP_OPENCL_DEVICE']
        scene_gpu = boxm2_scene_adaptor(scene_filename, opencl_device)
        scene_cpp = boxm2_scene_adaptor(scene_filename, 'cpp')

        type_id_fname = "type_names_list.txt"
        image_id_fname = "image_list.txt"

        std_dev_angle_default = 0
        cov_c_path = 'cov_c.txt'
        cov_c_default = 0

        if not number_images:
            number_images = len(images)
        number_images = min(len(images), number_images)

        with voxel_globe.tools.task_dir('generate_error_point_cloud', cd=True) \
             as processing_dir:
            for index, image in enumerate(random.sample(images,
                                                        number_images)):
                self.update_state(state='PROCESSING',
                                  meta={
                                      'stage': 'casting',
                                      'image': index + 1,
                                      'total': len(images)
                                  })

                k, r, t, o = get_krt(image, camera_set_id)

                attributes = image.camera_set.get(
                    cameraset=camera_set_id).attributes

                cov_c = attributes.get('position_error', cov_c_default)
                if position_error_override is not None:
                    cov_c = position_error_override
                std_dev_angle = attributes.get('orientation_error',
                                               std_dev_angle_default)
                if orientation_error_override is not None:
                    std_dev_angle = orientation_error_override
                cov_c = np.eye(3) * cov_c**2

                np.savetxt(cov_c_path, cov_c)

                perspective_camera = create_perspective_camera_krt(k, r, t)

                self.update_state(state='PROCESSING',
                                  meta={
                                      'stage': 'pre render',
                                      'image': index + 1,
                                      'total': len(images)
                                  })

                (depth_image, variance_image,
                 _) = render_depth(scene_gpu.scene, scene_gpu.opencl_cache,
                                   perspective_camera, image.image_width,
                                   image.image_height, scene_gpu.device)

                self.update_state(state='PROCESSING',
                                  meta={
                                      'stage': 'post_render',
                                      'image': index + 1,
                                      'total': len(images)
                                  })

                cov_v_path = 'cov_%06d.txt' % index
                appearance_model = 'image'

                self.update_state(state='PROCESSING',
                                  meta={
                                      'stage': 'pre_persp2gen',
                                      'image': index + 1,
                                      'total': len(images)
                                  })

                generic_camera = persp2gen(perspective_camera,
                                           image.image_width,
                                           image.image_height)

                self.update_state(state='PROCESSING',
                                  meta={
                                      'stage': 'pre_covar',
                                      'image': index + 1,
                                      'total': len(images)
                                  })

                compute_direction_covariance(perspective_camera, std_dev_angle,
                                             cov_v_path)

                self.update_state(state='PROCESSING',
                                  meta={
                                      'stage': 'pre_cast1',
                                      'image': index + 1,
                                      'total': len(images)
                                  })
                cast_3d_point(scene_cpp.scene, scene_cpp.cpu_cache,
                              perspective_camera, generic_camera, depth_image,
                              variance_image, appearance_model)
                self.update_state(state='PROCESSING',
                                  meta={
                                      'stage': 'pre_cast2',
                                      'image': index + 1,
                                      'total': len(images)
                                  })
                cast_3d_point_pass2(scene_cpp.scene, scene_cpp.cpu_cache,
                                    generic_camera, appearance_model,
                                    cov_c_path, cov_v_path)

                self.update_state(state='PROCESSING',
                                  meta={
                                      'stage': 'pre_accumulate',
                                      'image': index + 1,
                                      'total': len(images)
                                  })

                accumulate_3d_point_and_cov(scene_cpp.scene,
                                            scene_cpp.cpu_cache,
                                            appearance_model)

                #self.update_state(state='PROCESSING',
                #                  meta={'stage':'pre_write', 'image':index+1,
                #                        'total':len(images)})

                #write_cache(scene_cpp.cpu_cache, 1)

                self.update_state(state='PROCESSING',
                                  meta={
                                      'stage': 'post_write',
                                      'image': index + 1,
                                      'total': len(images)
                                  })

            self.update_state(state='PROCESSING',
                              meta={'stage': 'compute error'})

            normalize_3d_point_and_cov(scene_cpp.scene, scene_cpp.cpu_cache)

            self.update_state(state='PROCESSING',
                              meta={
                                  'stage': 'pre_write',
                                  'image': index + 1,
                                  'total': len(images)
                              })
            write_cache(scene_cpp.cpu_cache, 1)

            self.update_state(state='EXPORTING', meta={'stage': 'ply'})

            with voxel_globe.tools.storage_dir('generate_error_point_cloud') \
                 as storage_dir:
                ply_filename = os.path.join(storage_dir, 'model.ply')
                gen_error_point_cloud(scene_cpp.scene, scene_cpp.cpu_cache,
                                      ply_filename, prob)

                potree_filename = os.path.join(storage_dir, 'potree.ply')

            with voxel_globe.tools.image_dir('point_cloud') as potree_dir:
                convert_ply_to_potree(ply_filename, potree_dir)

            point_cloud = models.PointCloud(name='%s point cloud' %
                                            image_set.name,
                                            service_id=self.request.id,
                                            origin=voxel_world.origin)
            point_cloud.filename_path = ply_filename
            point_cloud.potree_dir = potree_dir
            point_cloud.save()

            voxel_files = lambda x: glob(os.path.join(voxel_world_dir, x))
            cleanup_files = []
            cleanup_files += voxel_files('boxm2_covariance_*.bin')
            cleanup_files += voxel_files('boxm2_point_*.bin')
            cleanup_files += voxel_files('float16_image_*.bin')
            for cleanup_file in cleanup_files:
                os.remove(cleanup_file)