Example #1
0
def estimate_overall_ref_model(colmap_folder,min_track_len,min_tri_angle,max_tri_angle,image_path):

	print 'Loading COLMAP data'
	scene_manager = SceneManager(colmap_folder)
	scene_manager.load_cameras()
	scene_manager.load_images()

	images=['frame0859.jpg', 'frame0867.jpg', 'frame0875.jpg', 'frame0883.jpg', 'frame0891.jpg']

	_L=np.array([])
	_r=np.array([])
	_ndotl=np.array([])

	for image_name in images: 	

	  	  
		  image_id = scene_manager.get_image_id_from_name(image_name)
		  image = scene_manager.images[image_id]
		  camera = scene_manager.get_camera(image.camera_id)

		  # image pose
		  R = util.quaternion_to_rotation_matrix(image.qvec)

		  print 'Loading 3D points'
		  scene_manager.load_points3D()
		  scene_manager.filter_points3D(min_track_len,
		    min_tri_angle=min_tri_angle, max_tri_angle=max_tri_angle,
		    image_list=set([image_id]))

		  points3D, points2D = scene_manager.get_points3D(image_id)
		  points3D = points3D.dot(R.T) + image.tvec[np.newaxis,:]

		  # need to remove redundant points
		  # http://stackoverflow.com/questions/16970982/find-unique-rows-in-numpy-array
		  points2D_view = np.ascontiguousarray(points2D).view(
		    np.dtype((np.void, points2D.dtype.itemsize * points2D.shape[1])))
		  _, idx = np.unique(points2D_view, return_index=True)

		  points2D, points3D = points2D[idx], points3D[idx]

		  # further rule out any points too close to the image border (for bilinear
		  # interpolation)
		  mask = (
		      (points2D[:,0] < camera.width - 1) & (points2D[:,1] < camera.height - 1))
		  points2D, points3D = points2D[mask], points3D[mask]

		  points2D_image = points2D.copy() # coordinates in image space
		  points2D = np.hstack((points2D, np.ones((points2D.shape[0], 1))))
		  points2D = points2D.dot(np.linalg.inv(camera.get_camera_matrix()).T)[:,:2]

		  print len(points3D), 'total points'

		  # load image
		  #image_file = scene_manager.image_path + image.name
		  image_file = image_path + image.name
		  im_rgb = skimage.img_as_float(skimage.io.imread(image_file)) # color image
		  L = skimage.color.rgb2lab(im_rgb)[:,:,0] * 0.01
		  L = np.maximum(L, 1e-6) # unfortunately, can't have black pixels, since we
		                          # divide by L
		  # initial values on unit sphere
		  x, y = camera.get_image_grid()

		  print 'Computing nearest neighbors'
		  kdtree = KDTree(points2D)
		  weights, nn_idxs = kdtree.query(np.c_[x.ravel(),y.ravel()], KD_TREE_KNN)
		  weights = weights.reshape(camera.height, camera.width, KD_TREE_KNN)
		  nn_idxs = nn_idxs.reshape(camera.height, camera.width, KD_TREE_KNN)

		  # turn distances into weights for the nearest neighbors
		  np.exp(-weights, weights) # in-place

		  # create initial surface on unit sphere
		  S0 = np.dstack((x, y, np.ones_like(x)))
		  S0 /= np.linalg.norm(S0, axis=-1)[:,:,np.newaxis]

		  r_fixed = np.linalg.norm(points3D, axis=-1) # fixed 3D depths

		  S = S0.copy()
		  z = S0[:,:,2]
		  S, r_ratios = nearest_neighbor_warp(weights, nn_idxs,
            points2D_image, r_fixed, util.generate_surface(camera, z))
		  z_est = np.maximum(S[:,:,2], 1e-6)
		  S = util.generate_surface(camera, z_est)
		  r = np.linalg.norm(S, axis=-1)
		  ndotl = util.calculate_ndotl(camera, S)
		  if _L.size == 0:
		  	_L=L.ravel()
		  	_r=r.ravel()
		  	_ndotl=ndotl.ravel()
		  else:
		  	_L=np.append(_L,L.ravel())
		  	_r=np.append(_r,r.ravel())
		  	_ndotl=np.append(_ndotl,ndotl.ravel())


	#
	falloff, model_params, residual = fit_reflectance_model(sfs.POWER_MODEL,
      _L, _r, _ndotl, False, 5)		  
	import pdb;pdb.set_trace();
Example #2
0
    def run_iterative_sfs(out_file, model_type, fit_falloff, *extra_params):
        #if os.path.exists(out_file + '_z.bin'): # don't re-run existing results
        #  return

        if model_type == sfs.LAMBERTIAN_MODEL:
            model_name = 'LAMBERTIAN_MODEL'
        elif model_type == sfs.OREN_NAYAR_MODEL:
            model_name = 'OREN_NAYAR_MODEL'
        elif model_type == sfs.PHONG_MODEL:
            model_name = 'PHONG_MODEL'
        elif model_type == sfs.COOK_TORRANCE_MODEL:
            model_name = 'COOK_TORRANCE_MODEL'
        elif model_type == sfs.POWER_MODEL:
            model_name = 'POWER_MODEL'

        if model_type == sfs.POWER_MODEL:
            print '%s_%i' % (model_name, extra_params[0])
        else:
            print model_name

        S = S0.copy()
        z = S0[:, :, 2]

        for iteration in xrange(MAX_NUM_ITER):
            print 'Iteration', iteration

            z_old = z

            if get_initial_warp:

                #z_gt=util.load_point_ply('C:\\Users\\user\\Documents\\UNC\\Research\\ColonProject\\code\\Rui\\SFS_CPU\\frame0859.jpg_gt.ply')
                # warp to 3D points
                if iteration > -1:
                    S, r_ratios = nearest_neighbor_warp(
                        weights, nn_idxs, points2D_image, r_fixed,
                        util.generate_surface(camera, z))
                    z_est = np.maximum(S[:, :, 2], 1e-6)
                    S = util.generate_surface(camera, z_est)

                else:
                    z_est = z

                S = util.generate_surface(camera, z_est)
                #util.save_sfs_ply('warp' + '.ply', S, im_rgb)
                #util.save_xyz('test.xyz',points3D);
                #z=z_est
                #break
                #z_est=z

            else:
                #import pdb;pdb.set_trace()
                #z_est = extract_depth_map(camera,ref_surf_name,R,image)
                z_est = np.fromfile(
                    'C:\Users\user\Documents\UNC\Research\ColonProject\code\SFS_Program_from_True\endo_evaluation\gt_surfaces\\frame0859.jpg.bin',
                    dtype=np.float32).reshape(camera.height,
                                              camera.width) / WORLD_SCALE
                z_est = z_est.astype(float)
                #S, r_ratios = nearest_neighbor_warp(weights, nn_idxs,
                #    points2D_image, r_fixed, util.generate_surface(camera, z_est))
                z_est = np.maximum(z_est[:, :], 1e-6)
                #Sworld = (S - image.tvec[np.newaxis,np.newaxis,:]).dot(R)
                S = util.generate_surface(camera, z_est)
                #util.save_sfs_ply('test' + '.ply', S, im_rgb)
                #util.save_sfs_ply(out_file + '_warp_%i.ply' % iteration, Sworld, im_rgb)
                #import pdb;pdb.set_trace()
                # if we need to, make corrections for non-positive depths

                #S = util.generate_surface(camera, z_est)

            mask = (z_est < INIT_Z)

            specular_mask = (L < 0.8)
            dark_mask = (L > 0.1)
            _mask = np.logical_and(specular_mask, mask)
            _mask = np.logical_and(_mask, dark_mask)
            # fit reflectance model
            r = np.linalg.norm(S, axis=-1)
            ndotl = util.calculate_ndotl(camera, S)
            falloff, model_params, residual = fit_reflectance_model(
                model_type, L[_mask], r.ravel(), r[_mask], ndotl.ravel(),
                ndotl[_mask], fit_falloff, camera.width, camera.height,
                *extra_params)
            #r = np.linalg.norm(S[specular_mask], axis=-1)

            #import pdb;pdb.set_trace()
            #model_params=np.array([26.15969874,-27.674055,-12.52426,7.579855,21.9768004,24.3911142,-21.7282996,-19.850894,-11.62229,-4.837014])
            #model_params=np.array([-19.4837,-490.4796,812.4527,-426.09107,139.2602,351.8061,-388.1591,875.5013,-302.4748,-414.4384])
            #falloff = 1.2
            #ndotl = util.calculate_ndotl(camera, S)[specular_mask]
            #falloff, model_params, residual = fit_reflectance_model(model_type,
            #    L[specular_mask], r, ndotl, fit_falloff, *extra_params)
            #r = np.linalg.norm(S[neighborhood_mask], axis=-1)
            #ndotl = util.calculate_ndotl(camera, S)[neighborhood_mask]
            #falloff, model_params, residual = fit_reflectance_model(model_type,
            #    L[neighborhood_mask], r, ndotl, fit_falloff, *extra_params)

            # lambda values reflect our confidence in the current surface: 0
            # corresponds to only using SFS at a pixel, 1 corresponds to equally
            # weighting SFS and the current estimate, and larger values increasingly
            # favor using only the current estimate

            rdiff = np.abs(r_fixed - get_estimated_r(S, points2D_image))
            w = np.log10(r_fixed) - np.log10(rdiff) - np.log10(2.)
            lambdas = (np.sum(weights * w[nn_idxs], axis=-1) /
                       np.sum(weights, axis=-1))
            lambdas = np.maximum(lambdas, 0.)  # just in case
            #
            lambdas[~mask] = 0

            #if iteration == 0: # don't use current estimated surface on first pass
            #lambdas = np.zeros_like(z)
            #else:
            #  r_ratios_postwarp = r_fixed / get_estimated_r(S, points2D_image)
            #  ratio_diff = np.abs(r_ratios_prewarp - r_ratios_postwarp)
            #  ratio_diff[ratio_diff == 0] = 1e-10 # arbitrarily high lambda
            #  feature_lambdas = 1. / ratio_diff
            #  lambdas = (np.sum(weights * feature_lambdas[nn_idxs], axis=-1) /
            #    np.sum(weights, axis=-1))

            # run SFS

            H_lut, dH_lut = compute_H_LUT(model_type, model_params,
                                          NUM_H_LUT_BINS)
            #import pdb;pdb.set_trace()
            # run SFS
            #H_lut = np.ascontiguousarray(H_lut.astype(np.float32))
            #dH_lut = np.ascontiguousarray(dH_lut.astype(np.float32))

            z = run_sfs(H_lut, dH_lut, camera, L, lambdas, z_est, model_type,
                        model_params, falloff, vmask,
                        use_image_weighted_derivatives)

            # check for convergence
            #diff = np.sum(np.abs(z_old[specular_mask] - z[specular_mask]))
            #if diff < CONVERGENCE_THRESHOLD * camera.height * camera.width:
            #  break

            # save the surface
            #S = util.generate_surface(camera, z)
            #S = (S - image.tvec[np.newaxis,np.newaxis,:]).dot(R)
            #util.save_sfs_ply(out_file + '_%i.ply' % iteration, S, im_rgb)
        else:
            print 'DID NOT CONVERGE'

        #import pdb;pdb.set_trace()
        S = util.generate_surface(camera, z)
        #S = (S - image.tvec[np.newaxis,np.newaxis,:]).dot(R)

        util.save_sfs_ply(out_file + '.ply', S, im_rgb)

        z.astype(np.float32).tofile(out_file + '_z.bin')

        # save the surface
        #S = util.generate_surface(camera, z)
        #S = (S - image.tvec[np.newaxis,np.newaxis,:]).dot(R)
        #S, r_ratios = nearest_neighbor_warp(weights, nn_idxs,
        #    points2D_image, r_fixed, util.generate_surface(camera, z))
        #util.save_sfs_ply(out_file + '_warped.ply', S, im_rgb)
        #z = np.maximum(S[:,:,2], 1e-6)
        #z.astype(np.float32).tofile(out_file + '_warped_z.bin')

        #reflectance_models.save_reflectance_model(out_file + '_reflectance.txt',
        #    model_name, residual, model_params, falloff, *extra_params)

        print
Example #3
0
def estimate_overall_ref_model(colmap_folder, min_track_len, min_tri_angle,
                               max_tri_angle, image_path):

    print 'Loading COLMAP data'
    scene_manager = SceneManager(colmap_folder)
    scene_manager.load_cameras()
    scene_manager.load_images()

    images = [
        'frame0859.jpg', 'frame0867.jpg', 'frame0875.jpg', 'frame0883.jpg',
        'frame0891.jpg'
    ]

    _L = np.array([])
    _r = np.array([])
    _ndotl = np.array([])

    for image_name in images:

        image_id = scene_manager.get_image_id_from_name(image_name)
        image = scene_manager.images[image_id]
        camera = scene_manager.get_camera(image.camera_id)

        # image pose
        R = util.quaternion_to_rotation_matrix(image.qvec)

        print 'Loading 3D points'
        scene_manager.load_points3D()
        scene_manager.filter_points3D(min_track_len,
                                      min_tri_angle=min_tri_angle,
                                      max_tri_angle=max_tri_angle,
                                      image_list=set([image_id]))

        points3D, points2D = scene_manager.get_points3D(image_id)
        points3D = points3D.dot(R.T) + image.tvec[np.newaxis, :]

        # need to remove redundant points
        # http://stackoverflow.com/questions/16970982/find-unique-rows-in-numpy-array
        points2D_view = np.ascontiguousarray(points2D).view(
            np.dtype((np.void, points2D.dtype.itemsize * points2D.shape[1])))
        _, idx = np.unique(points2D_view, return_index=True)

        points2D, points3D = points2D[idx], points3D[idx]

        # further rule out any points too close to the image border (for bilinear
        # interpolation)
        mask = ((points2D[:, 0] < camera.width - 1) &
                (points2D[:, 1] < camera.height - 1))
        points2D, points3D = points2D[mask], points3D[mask]

        points2D_image = points2D.copy()  # coordinates in image space
        points2D = np.hstack((points2D, np.ones((points2D.shape[0], 1))))
        points2D = points2D.dot(np.linalg.inv(
            camera.get_camera_matrix()).T)[:, :2]

        print len(points3D), 'total points'

        # load image
        #image_file = scene_manager.image_path + image.name
        image_file = image_path + image.name
        im_rgb = skimage.img_as_float(
            skimage.io.imread(image_file))  # color image
        L = skimage.color.rgb2lab(im_rgb)[:, :, 0] * 0.01
        L = np.maximum(
            L, 1e-6)  # unfortunately, can't have black pixels, since we
        # divide by L
        # initial values on unit sphere
        x, y = camera.get_image_grid()

        print 'Computing nearest neighbors'
        kdtree = KDTree(points2D)
        weights, nn_idxs = kdtree.query(np.c_[x.ravel(), y.ravel()],
                                        KD_TREE_KNN)
        weights = weights.reshape(camera.height, camera.width, KD_TREE_KNN)
        nn_idxs = nn_idxs.reshape(camera.height, camera.width, KD_TREE_KNN)

        # turn distances into weights for the nearest neighbors
        np.exp(-weights, weights)  # in-place

        # create initial surface on unit sphere
        S0 = np.dstack((x, y, np.ones_like(x)))
        S0 /= np.linalg.norm(S0, axis=-1)[:, :, np.newaxis]

        r_fixed = np.linalg.norm(points3D, axis=-1)  # fixed 3D depths

        S = S0.copy()
        z = S0[:, :, 2]
        S, r_ratios = nearest_neighbor_warp(weights, nn_idxs, points2D_image,
                                            r_fixed,
                                            util.generate_surface(camera, z))
        z_est = np.maximum(S[:, :, 2], 1e-6)
        S = util.generate_surface(camera, z_est)
        r = np.linalg.norm(S, axis=-1)
        ndotl = util.calculate_ndotl(camera, S)
        if _L.size == 0:
            _L = L.ravel()
            _r = r.ravel()
            _ndotl = ndotl.ravel()
        else:
            _L = np.append(_L, L.ravel())
            _r = np.append(_r, r.ravel())
            _ndotl = np.append(_ndotl, ndotl.ravel())

    #
    falloff, model_params, residual = fit_reflectance_model(
        sfs.POWER_MODEL, _L, _r, _ndotl, False, 5)
    import pdb
    pdb.set_trace()
Example #4
0
  def run_iterative_sfs(out_file, model_type, fit_falloff, *extra_params):
    #if os.path.exists(out_file + '_z.bin'): # don't re-run existing results
    #  return

    if model_type == sfs.LAMBERTIAN_MODEL:
      model_name = 'LAMBERTIAN_MODEL'
    elif model_type == sfs.OREN_NAYAR_MODEL:
      model_name = 'OREN_NAYAR_MODEL'
    elif model_type == sfs.PHONG_MODEL:
      model_name = 'PHONG_MODEL'
    elif model_type == sfs.COOK_TORRANCE_MODEL:
      model_name = 'COOK_TORRANCE_MODEL'
    elif model_type == sfs.POWER_MODEL:
      model_name = 'POWER_MODEL'

    if model_type == sfs.POWER_MODEL:
      print '%s_%i' % (model_name, extra_params[0])
    else:
      print model_name

    S = S0.copy()
    z = S0[:,:,2]

    for iteration in xrange(MAX_NUM_ITER):
      print 'Iteration', iteration

      z_old = z

      if get_initial_warp:

      	#z_gt=util.load_point_ply('C:\\Users\\user\\Documents\\UNC\\Research\\ColonProject\\code\\Rui\\SFS_CPU\\frame0859.jpg_gt.ply')
        # warp to 3D points
        if iteration>-1:
        	S, r_ratios = nearest_neighbor_warp(weights, nn_idxs,
        	    points2D_image, r_fixed, util.generate_surface(camera, z))
        	z_est = np.maximum(S[:,:,2], 1e-6)
        	S=util.generate_surface(camera, z_est)

        else:
        	z_est=z

        S=util.generate_surface(camera, z_est)
        #util.save_sfs_ply('warp' + '.ply', S, im_rgb)
        #util.save_xyz('test.xyz',points3D);
        #z=z_est
        #break
        #z_est=z

      else:
        #import pdb;pdb.set_trace()
        #z_est = extract_depth_map(camera,ref_surf_name,R,image)
        z_est=np.fromfile(
          'C:\Users\user\Documents\UNC\Research\ColonProject\code\SFS_Program_from_True\endo_evaluation\gt_surfaces\\frame0859.jpg.bin', dtype=np.float32).reshape(
            camera.height, camera.width)/WORLD_SCALE
        z_est=z_est.astype(float)
        #S, r_ratios = nearest_neighbor_warp(weights, nn_idxs,
        #    points2D_image, r_fixed, util.generate_surface(camera, z_est))
        z_est = np.maximum(z_est[:,:], 1e-6)
        #Sworld = (S - image.tvec[np.newaxis,np.newaxis,:]).dot(R)
        S = util.generate_surface(camera, z_est)
        #util.save_sfs_ply('test' + '.ply', S, im_rgb)
        #util.save_sfs_ply(out_file + '_warp_%i.ply' % iteration, Sworld, im_rgb)
        #import pdb;pdb.set_trace()
        # if we need to, make corrections for non-positive depths
      
        #S = util.generate_surface(camera, z_est)

      mask = (z_est < INIT_Z)

      specular_mask=(L<0.8)
      dark_mask=(L>0.1)
      _mask=np.logical_and(specular_mask,mask)
      _mask=np.logical_and(_mask,dark_mask)
      # fit reflectance model
      r = np.linalg.norm(S, axis=-1)
      ndotl = util.calculate_ndotl(camera, S)
      falloff, model_params, residual = fit_reflectance_model(model_type,
          L[_mask],r.ravel(), r[_mask],ndotl.ravel(), ndotl[_mask], fit_falloff,camera.width,camera.height, *extra_params)
      #r = np.linalg.norm(S[specular_mask], axis=-1)

      #import pdb;pdb.set_trace()
      #model_params=np.array([26.15969874,-27.674055,-12.52426,7.579855,21.9768004,24.3911142,-21.7282996,-19.850894,-11.62229,-4.837014])
      #model_params=np.array([-19.4837,-490.4796,812.4527,-426.09107,139.2602,351.8061,-388.1591,875.5013,-302.4748,-414.4384])
      #falloff = 1.2
      #ndotl = util.calculate_ndotl(camera, S)[specular_mask]
      #falloff, model_params, residual = fit_reflectance_model(model_type,
      #    L[specular_mask], r, ndotl, fit_falloff, *extra_params)
      #r = np.linalg.norm(S[neighborhood_mask], axis=-1)
      #ndotl = util.calculate_ndotl(camera, S)[neighborhood_mask]
      #falloff, model_params, residual = fit_reflectance_model(model_type,
      #    L[neighborhood_mask], r, ndotl, fit_falloff, *extra_params)

      # lambda values reflect our confidence in the current surface: 0
      # corresponds to only using SFS at a pixel, 1 corresponds to equally
      # weighting SFS and the current estimate, and larger values increasingly
      # favor using only the current estimate

      rdiff = np.abs(r_fixed - get_estimated_r(S, points2D_image))
      w = np.log10(r_fixed) - np.log10(rdiff) - np.log10(2.)
      lambdas = (np.sum(weights * w[nn_idxs], axis=-1) /
          np.sum(weights, axis=-1))
      lambdas = np.maximum(lambdas, 0.) # just in case
#
      lambdas[~mask] = 0

      #if iteration == 0: # don't use current estimated surface on first pass
      #lambdas = np.zeros_like(z)
      #else:
      #  r_ratios_postwarp = r_fixed / get_estimated_r(S, points2D_image)
      #  ratio_diff = np.abs(r_ratios_prewarp - r_ratios_postwarp)
      #  ratio_diff[ratio_diff == 0] = 1e-10 # arbitrarily high lambda
      #  feature_lambdas = 1. / ratio_diff
      #  lambdas = (np.sum(weights * feature_lambdas[nn_idxs], axis=-1) /
      #    np.sum(weights, axis=-1))

      # run SFS

      H_lut, dH_lut = compute_H_LUT(model_type, model_params, NUM_H_LUT_BINS)
      #import pdb;pdb.set_trace()
      # run SFS
      #H_lut = np.ascontiguousarray(H_lut.astype(np.float32))
      #dH_lut = np.ascontiguousarray(dH_lut.astype(np.float32))


      z = run_sfs(H_lut,dH_lut,camera, L, lambdas, z_est, model_type, model_params, falloff,vmask,
          use_image_weighted_derivatives)

      # check for convergence
      #diff = np.sum(np.abs(z_old[specular_mask] - z[specular_mask]))
      #if diff < CONVERGENCE_THRESHOLD * camera.height * camera.width:
      #  break
      
      # save the surface
      #S = util.generate_surface(camera, z)
      #S = (S - image.tvec[np.newaxis,np.newaxis,:]).dot(R)
      #util.save_sfs_ply(out_file + '_%i.ply' % iteration, S, im_rgb)
    else:
      print 'DID NOT CONVERGE'

    #import pdb;pdb.set_trace()
    S = util.generate_surface(camera, z)
    #S = (S - image.tvec[np.newaxis,np.newaxis,:]).dot(R)
   
    util.save_sfs_ply(out_file + '.ply', S, im_rgb)
   
    z.astype(np.float32).tofile(out_file + '_z.bin')

    # save the surface
    #S = util.generate_surface(camera, z)
    #S = (S - image.tvec[np.newaxis,np.newaxis,:]).dot(R)
    #S, r_ratios = nearest_neighbor_warp(weights, nn_idxs,
    #    points2D_image, r_fixed, util.generate_surface(camera, z))    
    #util.save_sfs_ply(out_file + '_warped.ply', S, im_rgb)
    #z = np.maximum(S[:,:,2], 1e-6)
    #z.astype(np.float32).tofile(out_file + '_warped_z.bin')

    #reflectance_models.save_reflectance_model(out_file + '_reflectance.txt',
    #    model_name, residual, model_params, falloff, *extra_params)

    print