Ejemplo n.º 1
0
    def get_viewed_points(self, image_id):
        image = self.images[image_id]

        # get unfiltered points
        point3D_idxs = set(self.point3D_id_to_point3D_idx.itervalues())
        point3D_idxs.discard(-1)
        point3D_idxs = list(point3D_idxs)
        points3D = self.points3D[point3D_idxs, :]

        # orient points relative to camera
        R = util.quaternion_to_rotation_matrix(image.qvec)
        points3D = points3D.dot(R.T) + image.tvec[np.newaxis, :]
        points3D = points3D[points3D[:, 2] >
                            0, :]  # keep points in front of camera

        # put points into image coordinates
        camera = self.cameras[image.camera_id]
        points2D = points3D.dot(camera.get_camera_matrix().T)
        points2D = points2D[:, :2] / points2D[:, 2][:, np.newaxis]

        # keep points that are within the image
        mask = ((points2D[:, 0] >= 0) & (points2D[:, 1] >= 0) &
                (points2D[:, 0] < camera.width - 1) &
                (points2D[:, 1] < camera.height - 1))

        return points2D[mask, :], points3D[mask, :]
Ejemplo n.º 2
0
  def get_viewed_points(self, image_id):
    image = self.images[image_id]

    # get unfiltered points
    point3D_idxs = set(self.point3D_id_to_point3D_idx.itervalues())
    point3D_idxs.discard(-1)
    point3D_idxs = list(point3D_idxs)
    points3D = self.points3D[point3D_idxs,:]

    # orient points relative to camera
    R = util.quaternion_to_rotation_matrix(image.qvec)
    points3D = points3D.dot(R.T) + image.tvec[np.newaxis,:]
    points3D = points3D[points3D[:,2] > 0,:] # keep points in front of camera

    # put points into image coordinates
    camera = self.cameras[image.camera_id]
    points2D = points3D.dot(camera.get_camera_matrix().T)
    points2D = points2D[:,:2] / points2D[:,2][:,np.newaxis]

    # keep points that are within the image
    mask = ((points2D[:,0] >= 0) & (points2D[:,1] >= 0) &
      (points2D[:,0] < camera.width - 1) & (points2D[:,1] < camera.height - 1))

    return points2D[mask,:], points3D[mask,:]
Ejemplo n.º 3
0
def estimate_overall_ref_model(colmap_folder,min_track_len,min_tri_angle,max_tri_angle,image_path):

	print 'Loading COLMAP data'
	scene_manager = SceneManager(colmap_folder)
	scene_manager.load_cameras()
	scene_manager.load_images()

	images=['frame0859.jpg', 'frame0867.jpg', 'frame0875.jpg', 'frame0883.jpg', 'frame0891.jpg']

	_L=np.array([])
	_r=np.array([])
	_ndotl=np.array([])

	for image_name in images: 	

	  	  
		  image_id = scene_manager.get_image_id_from_name(image_name)
		  image = scene_manager.images[image_id]
		  camera = scene_manager.get_camera(image.camera_id)

		  # image pose
		  R = util.quaternion_to_rotation_matrix(image.qvec)

		  print 'Loading 3D points'
		  scene_manager.load_points3D()
		  scene_manager.filter_points3D(min_track_len,
		    min_tri_angle=min_tri_angle, max_tri_angle=max_tri_angle,
		    image_list=set([image_id]))

		  points3D, points2D = scene_manager.get_points3D(image_id)
		  points3D = points3D.dot(R.T) + image.tvec[np.newaxis,:]

		  # need to remove redundant points
		  # http://stackoverflow.com/questions/16970982/find-unique-rows-in-numpy-array
		  points2D_view = np.ascontiguousarray(points2D).view(
		    np.dtype((np.void, points2D.dtype.itemsize * points2D.shape[1])))
		  _, idx = np.unique(points2D_view, return_index=True)

		  points2D, points3D = points2D[idx], points3D[idx]

		  # further rule out any points too close to the image border (for bilinear
		  # interpolation)
		  mask = (
		      (points2D[:,0] < camera.width - 1) & (points2D[:,1] < camera.height - 1))
		  points2D, points3D = points2D[mask], points3D[mask]

		  points2D_image = points2D.copy() # coordinates in image space
		  points2D = np.hstack((points2D, np.ones((points2D.shape[0], 1))))
		  points2D = points2D.dot(np.linalg.inv(camera.get_camera_matrix()).T)[:,:2]

		  print len(points3D), 'total points'

		  # load image
		  #image_file = scene_manager.image_path + image.name
		  image_file = image_path + image.name
		  im_rgb = skimage.img_as_float(skimage.io.imread(image_file)) # color image
		  L = skimage.color.rgb2lab(im_rgb)[:,:,0] * 0.01
		  L = np.maximum(L, 1e-6) # unfortunately, can't have black pixels, since we
		                          # divide by L
		  # initial values on unit sphere
		  x, y = camera.get_image_grid()

		  print 'Computing nearest neighbors'
		  kdtree = KDTree(points2D)
		  weights, nn_idxs = kdtree.query(np.c_[x.ravel(),y.ravel()], KD_TREE_KNN)
		  weights = weights.reshape(camera.height, camera.width, KD_TREE_KNN)
		  nn_idxs = nn_idxs.reshape(camera.height, camera.width, KD_TREE_KNN)

		  # turn distances into weights for the nearest neighbors
		  np.exp(-weights, weights) # in-place

		  # create initial surface on unit sphere
		  S0 = np.dstack((x, y, np.ones_like(x)))
		  S0 /= np.linalg.norm(S0, axis=-1)[:,:,np.newaxis]

		  r_fixed = np.linalg.norm(points3D, axis=-1) # fixed 3D depths

		  S = S0.copy()
		  z = S0[:,:,2]
		  S, r_ratios = nearest_neighbor_warp(weights, nn_idxs,
            points2D_image, r_fixed, util.generate_surface(camera, z))
		  z_est = np.maximum(S[:,:,2], 1e-6)
		  S = util.generate_surface(camera, z_est)
		  r = np.linalg.norm(S, axis=-1)
		  ndotl = util.calculate_ndotl(camera, S)
		  if _L.size == 0:
		  	_L=L.ravel()
		  	_r=r.ravel()
		  	_ndotl=ndotl.ravel()
		  else:
		  	_L=np.append(_L,L.ravel())
		  	_r=np.append(_r,r.ravel())
		  	_ndotl=np.append(_ndotl,ndotl.ravel())


	#
	falloff, model_params, residual = fit_reflectance_model(sfs.POWER_MODEL,
      _L, _r, _ndotl, False, 5)		  
	import pdb;pdb.set_trace();
Ejemplo n.º 4
0
def main(_):

    with tf.Graph().as_default():
        #Load image and label
        x1 = tf.placeholder(shape=[1, 224, 224, 3], dtype=tf.float32)
        x2 = tf.placeholder(shape=[1, 224, 224, 3], dtype=tf.float32)
        gt_x1_depth = tf.placeholder(shape=[1, 224, 224, 1], dtype=tf.float32)

        img_list = sorted(glob(FLAGS.dataset_dir + '/*.jpg'))

        pred_poses = tf.placeholder(shape=[1, 4, 4], dtype=tf.float32)
        intrinsics = tf.placeholder(shape=[1, 3, 3], dtype=tf.float32)

        tf_points3D = tf.placeholder(shape=[224, 224], dtype=tf.float32)
        tf_multiply = tf.placeholder(shape=[224, 224], dtype=tf.float32)
        tf_points2D = tf.placeholder(shape=[None, 2], dtype=tf.float32)

        #import pdb;pdb.set_trace()
        m_stack_intrinsics = get_multi_scale_intrinsics(
            intrinsics, FLAGS.num_scales, 224.0 / 720.0, 224.0 / 240.0)

        # # Define the model:
        with tf.name_scope("Prediction"):

            pred_disp, depth_net_endpoints = disp_net(x1, is_training=True)

            scale_factor = get_scale_factor(tf_points3D,
                                            1.0 / pred_disp[0][0, :, :, 0],
                                            tf_points2D)

        with tf.name_scope("compute_loss"):

            pixel_loss = 0

            smooth_loss = 0

            curr_proj_image = []

            for s in range(FLAGS.num_scales):
                smooth_loss += FLAGS.smooth_weight/(2**s) * \
                    compute_smooth_loss(pred_disp[s])

                curr_src_image = tf.image.resize_area(
                    x1,
                    [int(224 / (2**s)), int(224 / (2**s))])
                curr_tgt_image = tf.image.resize_area(
                    x2,
                    [int(224 / (2**s)), int(224 / (2**s))])

                curr_gt_x1_depth = tf.image.resize_area(
                    gt_x1_depth,
                    [int(224 / (2**s)), int(224 / (2**s))])

                #import pdb;pdb.set_trace()
                curr_proj_image.append(
                    projective_inverse_warp(
                        curr_tgt_image,
                        #tf.squeeze(1.0/curr_gt_x1_depth,axis=3),
                        tf.squeeze(1.0 / pred_disp[s], axis=3),
                        tf.matmul(pred_poses, scale_factor),
                        #pred_poses,
                        m_stack_intrinsics[:, s, :, :]))

                curr_proj_error = tf.abs(curr_src_image - curr_proj_image[s])
                curr_depth_error = tf.abs(curr_gt_x1_depth -
                                          scale_factor[0, 0, 0] * pred_disp[s])

                pixel_loss += tf.reduce_mean(curr_proj_error)
                pixel_loss += tf.reduce_mean(
                    curr_depth_error) * FLAGS.data_weight / (2**s)

            total_loss = pixel_loss + smooth_loss

            saver = tf.train.Saver([var for var in tf.model_variables()])

            checkpoint = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)

            optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate,
                                               FLAGS.beta1)
            global_step = tf.Variable(0, name='global_step', trainable=False)
            train_op = optimizer.minimize(total_loss, global_step=global_step)

        with tf.Session() as sess:

            init = tf.global_variables_initializer()
            sess.run(init)
            saver.restore(sess, checkpoint)

            I1 = pil.open(img_list[0])
            I1 = np.array(I1)
            I1 = cv2.resize(I1, (224, 224), interpolation=cv2.INTER_AREA)

            I2 = pil.open(img_list[1])
            I2 = np.array(I2)
            I2 = cv2.resize(I2, (224, 224), interpolation=cv2.INTER_AREA)

            m_intric = np.array([[567.239, 0.0, 376.04],
                                 [0.0, 261.757, 100.961], [0.0, 0.0, 1.0]])

            z = np.fromfile(
                '/home/wrlife/project/deeplearning/depth_prediction/backprojtest/oldcase1/frame1819.jpg_z.bin',
                dtype=np.float32).reshape(FLAGS.image_height,
                                          FLAGS.image_width, 1)

            #================================
            #
            #================================
            #Get camera translation matrix

            r1, t1, _, _ = util.get_camera_pose(
                FLAGS.dataset_dir + "/images.txt", 'frame1819.jpg')
            r2, t2, _, _ = util.get_camera_pose(
                FLAGS.dataset_dir + "/images.txt", 'frame1829.jpg')

            scene_manager = SceneManager(FLAGS.dataset_dir)

            scene_manager.load_cameras()
            scene_manager.load_images()
            scene_manager.load_points3D()

            image_name = 'frame1819.jpg'
            image_id = scene_manager.get_image_id_from_name(image_name)
            image = scene_manager.images[image_id]
            camera = scene_manager.get_camera(image.camera_id)
            points3D, points2D = scene_manager.get_points3D(image_id)
            R = util.quaternion_to_rotation_matrix(image.qvec)
            points3D = points3D.dot(R.T) + image.tvec[np.newaxis, :]

            #points2D = np.hstack((points2D, np.ones((points2D.shape[0], 1))))
            #points2D = points2D.dot(np.linalg.inv(camera.get_camera_matrix()).T)[:,:2]

            vmask = np.zeros_like(z)
            mulmask = np.zeros_like(z)
            for k, (u, v) in enumerate(points2D):
                j0, i0 = int(u), int(
                    v)  # upper left pixel for the (u,v) coordinate
                vmask[i0, j0] = points3D[k, 2]
                mulmask[i0, j0] = 1.0

            z = cv2.resize(z, (224, 224), interpolation=cv2.INTER_AREA)
            z = z[:, :, np.newaxis]
            #z = z/100
            z = 1.0 / z

            vmask = cv2.resize(vmask, (224, 224),
                               interpolation=cv2.INTER_NEAREST)
            mulmask = cv2.resize(mulmask, (224, 224),
                                 interpolation=cv2.INTER_NEAREST)

            resized_points2D = []
            for i in range(224):
                for j in range(224):
                    if mulmask[i, j] != 0:
                        resized_points2D.append([i, j])

            # import pdb;pdb.set_trace()
            # z_stack = []
            # points3D_stack = []
            # for i in range(len(resized_points2D)):
            # 	i0=int(resized_points2D[i][0])
            # 	j0=int(resized_points2D[i][1])
            # 	z_stack.append(z[i0,j0])
            # 	points3D_stack.append(vmask[i0,j0])

            # import pdb;pdb.set_trace()
            # m_s = np.median(points3D_stack)/np.median(z_stack)
            # scale_m = np.eye(4)
            # scale_m[0,0] = m_s
            # scale_m[1,1] = m_s
            # scale_m[2,2] = m_s

            # #z = m_s*z

            pad = np.array([[0, 0, 0, 1]])

            homo1 = np.append(r1, t1.reshape(3, 1), 1)
            homo1 = np.append(homo1, pad, 0)

            homo2 = np.append(r2, t2.reshape(3, 1), 1)
            homo2 = np.append(homo2, pad, 0)

            src2tgt_proj = np.dot(inv(homo2), homo1)

            #pred_poses = np.array([src2tgt_proj[0,3],src2tgt_proj[1,3],src2tgt_proj[2,3],])
            I1 = I1 / 255.0
            I2 = I2 / 255.0

            for i in range(1, 100001):

                _, pred, reproject_img, tmp_total_loss, tmp_proj, tmp_scale = sess.run(
                    [
                        train_op, pred_disp, curr_proj_image, total_loss,
                        pred_poses, scale_factor
                    ],
                    feed_dict={
                        x1: I1[None, :, :, :],
                        x2: I2[None, :, :, :],
                        pred_poses: src2tgt_proj[None, :, :],
                        intrinsics: m_intric[None, :, :],
                        gt_x1_depth: z[None, :, :, :],
                        tf_points3D: vmask,
                        tf_multiply: mulmask,
                        tf_points2D: np.array(resized_points2D,
                                              dtype=np.float32)
                    })

                # z=cv2.resize(pred[0][0,:,:,0],(FLAGS.image_width,FLAGS.image_height))
                # z.astype(np.float32).tofile(FLAGS.output_dir+img_list[i].split('/')[-1]+'_z.bin')

                if i >= 0 and i % 10 == 0:
                    print('Step %s - Loss: %.2f ' % (i, tmp_total_loss))
                    print('Scale %f' % (tmp_scale[0, 0, 0]))

                if i == 1:

                    test = reproject_img[0]
                    test = test * 255
                    test = test.astype(np.uint8)

                    plt.imshow(test[0])
                    plt.show()
                    import pdb
                    pdb.set_trace()
                if i == 200:

                    test = reproject_img[0]
                    test = test * 255
                    test = test.astype(np.uint8)

                    plt.imshow(test[0])
                    plt.show()
                    import pdb
                    pdb.set_trace()

                #print("The %dth frame is processed"%(i))
                if i == 500:
                    z_out = cv2.resize(1.0 / pred[0][0, :, :, 0],
                                       (FLAGS.image_width, FLAGS.image_height))
                    z_out.astype(
                        np.float32).tofile(FLAGS.output_dir +
                                           img_list[0].split('/')[-1] +
                                           '_z.bin')

                if i == 1000:

                    test = reproject_img[0]
                    test = test * 255
                    test = test.astype(np.uint8)

                    plt.imshow(test[0])
                    plt.show()
                    import pdb
                    pdb.set_trace()
Ejemplo n.º 5
0
def estimate_overall_ref_model(colmap_folder, min_track_len, min_tri_angle,
                               max_tri_angle, image_path):

    print 'Loading COLMAP data'
    scene_manager = SceneManager(colmap_folder)
    scene_manager.load_cameras()
    scene_manager.load_images()

    images = [
        'frame0859.jpg', 'frame0867.jpg', 'frame0875.jpg', 'frame0883.jpg',
        'frame0891.jpg'
    ]

    _L = np.array([])
    _r = np.array([])
    _ndotl = np.array([])

    for image_name in images:

        image_id = scene_manager.get_image_id_from_name(image_name)
        image = scene_manager.images[image_id]
        camera = scene_manager.get_camera(image.camera_id)

        # image pose
        R = util.quaternion_to_rotation_matrix(image.qvec)

        print 'Loading 3D points'
        scene_manager.load_points3D()
        scene_manager.filter_points3D(min_track_len,
                                      min_tri_angle=min_tri_angle,
                                      max_tri_angle=max_tri_angle,
                                      image_list=set([image_id]))

        points3D, points2D = scene_manager.get_points3D(image_id)
        points3D = points3D.dot(R.T) + image.tvec[np.newaxis, :]

        # need to remove redundant points
        # http://stackoverflow.com/questions/16970982/find-unique-rows-in-numpy-array
        points2D_view = np.ascontiguousarray(points2D).view(
            np.dtype((np.void, points2D.dtype.itemsize * points2D.shape[1])))
        _, idx = np.unique(points2D_view, return_index=True)

        points2D, points3D = points2D[idx], points3D[idx]

        # further rule out any points too close to the image border (for bilinear
        # interpolation)
        mask = ((points2D[:, 0] < camera.width - 1) &
                (points2D[:, 1] < camera.height - 1))
        points2D, points3D = points2D[mask], points3D[mask]

        points2D_image = points2D.copy()  # coordinates in image space
        points2D = np.hstack((points2D, np.ones((points2D.shape[0], 1))))
        points2D = points2D.dot(np.linalg.inv(
            camera.get_camera_matrix()).T)[:, :2]

        print len(points3D), 'total points'

        # load image
        #image_file = scene_manager.image_path + image.name
        image_file = image_path + image.name
        im_rgb = skimage.img_as_float(
            skimage.io.imread(image_file))  # color image
        L = skimage.color.rgb2lab(im_rgb)[:, :, 0] * 0.01
        L = np.maximum(
            L, 1e-6)  # unfortunately, can't have black pixels, since we
        # divide by L
        # initial values on unit sphere
        x, y = camera.get_image_grid()

        print 'Computing nearest neighbors'
        kdtree = KDTree(points2D)
        weights, nn_idxs = kdtree.query(np.c_[x.ravel(), y.ravel()],
                                        KD_TREE_KNN)
        weights = weights.reshape(camera.height, camera.width, KD_TREE_KNN)
        nn_idxs = nn_idxs.reshape(camera.height, camera.width, KD_TREE_KNN)

        # turn distances into weights for the nearest neighbors
        np.exp(-weights, weights)  # in-place

        # create initial surface on unit sphere
        S0 = np.dstack((x, y, np.ones_like(x)))
        S0 /= np.linalg.norm(S0, axis=-1)[:, :, np.newaxis]

        r_fixed = np.linalg.norm(points3D, axis=-1)  # fixed 3D depths

        S = S0.copy()
        z = S0[:, :, 2]
        S, r_ratios = nearest_neighbor_warp(weights, nn_idxs, points2D_image,
                                            r_fixed,
                                            util.generate_surface(camera, z))
        z_est = np.maximum(S[:, :, 2], 1e-6)
        S = util.generate_surface(camera, z_est)
        r = np.linalg.norm(S, axis=-1)
        ndotl = util.calculate_ndotl(camera, S)
        if _L.size == 0:
            _L = L.ravel()
            _r = r.ravel()
            _ndotl = ndotl.ravel()
        else:
            _L = np.append(_L, L.ravel())
            _r = np.append(_r, r.ravel())
            _ndotl = np.append(_ndotl, ndotl.ravel())

    #
    falloff, model_params, residual = fit_reflectance_model(
        sfs.POWER_MODEL, _L, _r, _ndotl, False, 5)
    import pdb
    pdb.set_trace()
Ejemplo n.º 6
0
def run(image_name,
        image_path,
        colmap_folder,
        out_folder,
        min_track_len,
        min_tri_angle,
        max_tri_angle,
        ref_surf_name,
        max_num_points=None,
        estimate_falloff=True,
        use_image_weighted_derivatives=True,
        get_initial_warp=True):

    min_track_len = int(min_track_len)
    min_tri_angle = int(min_tri_angle)
    max_tri_angle = int(max_tri_angle)

    #est_global_ref.estimate_overall_ref_model(colmap_folder,min_track_len,min_tri_angle,max_tri_angle,image_path)

    try:
        max_num_points = int(max_num_points)
    except:
        max_num_points = None

    get_initial_warp = (get_initial_warp not in ('False', 'false'))

    estimate_falloff = (estimate_falloff not in ('False', 'false'))
    use_image_weighted_derivatives = (use_image_weighted_derivatives
                                      not in ('False', 'false'))

    #if not image_path.endswith('/'):
    #  image_path += '/'

    print 'Loading COLMAP data'
    scene_manager = SceneManager(colmap_folder)
    scene_manager.load_cameras()
    scene_manager.load_images()
    image_id = scene_manager.get_image_id_from_name(image_name)
    image = scene_manager.images[image_id]
    camera = scene_manager.get_camera(image.camera_id)

    # image pose
    R = util.quaternion_to_rotation_matrix(image.qvec)

    print 'Loading 3D points'
    scene_manager.load_points3D()
    scene_manager.filter_points3D(min_track_len,
                                  min_tri_angle=min_tri_angle,
                                  max_tri_angle=max_tri_angle,
                                  image_list=set([image_id]))

    points3D, points2D = scene_manager.get_points3D(image_id)
    points3D = points3D.dot(R.T) + image.tvec[np.newaxis, :]

    # need to remove redundant points
    # http://stackoverflow.com/questions/16970982/find-unique-rows-in-numpy-array
    points2D_view = np.ascontiguousarray(points2D).view(
        np.dtype((np.void, points2D.dtype.itemsize * points2D.shape[1])))
    _, idx = np.unique(points2D_view, return_index=True)

    points2D, points3D = points2D[idx], points3D[idx]

    # further rule out any points too close to the image border (for bilinear
    # interpolation)
    mask = ((points2D[:, 0] < camera.width - 1) &
            (points2D[:, 1] < camera.height - 1))
    points2D, points3D = points2D[mask], points3D[mask]

    points2D_image = points2D.copy()  # coordinates in image space
    points2D = np.hstack((points2D, np.ones((points2D.shape[0], 1))))
    points2D = points2D.dot(np.linalg.inv(camera.get_camera_matrix()).T)[:, :2]

    if (max_num_points is not None and max_num_points > 0
            and max_num_points < len(points2D)):
        np.random.seed(0)  # fix the "random" points selected
        selected_points = np.random.choice(len(points2D), max_num_points,
                                           False)
        points2D = points2D[selected_points]
        points2D_image = points2D_image[selected_points]
        points3D = points3D[selected_points]

    print len(points3D), 'total points'

    #perturb points

    #import pdb;pdb.set_trace()
    #perturb_points=np.random.choice(len(points2D),max_num_points*0.3,False)
    #randomperturb=np.random.rand(perturb_points.size)*2
    #points3D[perturb_points,2]=points3D[perturb_points,2]*randomperturb

    #points3D[2][2]=points3D[2][2]*0.5
    #points3D[4][2]=points3D[4][2]*1.5
    #points3D[8][2]=points3D[8][2]*0.3
    #points3D[9][2]=points3D[9][2]*2.
    # load image
    #image_file = scene_manager.image_path + image.name
    image_file = image_path + image.name
    im_rgb = skimage.img_as_float(skimage.io.imread(image_file))  # color image
    L = skimage.color.rgb2lab(im_rgb)[:, :, 0] * 0.01

    #import pdb;pdb.set_trace()

    #skimage.io.imsave('test.png',L)

    L = np.maximum(L, 1e-6)  # unfortunately, can't have black pixels, since we
    # divide by L

    # initial values on unit sphere
    x, y = camera.get_image_grid()

    print 'Computing nearest neighbors'

    #import pdb;pdb.set_trace()
    nn_idxs, weights = compute_nn(points2D.astype(np.float32), camera.width,
                                  camera.height, camera.fx, camera.fy,
                                  camera.cx, camera.cy)
    nn_idxs = np.swapaxes(nn_idxs, 0, 2)
    nn_idxs = np.swapaxes(nn_idxs, 0, 1)

    weights = np.swapaxes(weights, 0, 2)
    weights = np.swapaxes(weights, 0, 1)

    #kdtree = KDTree(points2D)
    #weights, nn_idxs = kdtree.query(np.c_[x.ravel(),y.ravel()], KD_TREE_KNN)
    #weights = weights.reshape(camera.height, camera.width, KD_TREE_KNN)
    #nn_idxs = nn_idxs.reshape(camera.height, camera.width, KD_TREE_KNN)

    # figure out pixel neighborhoods for each point
    #neighborhoods = []
    neighborhood_mask = np.zeros((camera.height, camera.width), dtype=np.bool)
    for v, u in points2D_image:
        rr, cc = circle(int(u), int(v), MAX_POINT_DISTANCE,
                        (camera.height, camera.width))
        #neighborhoods.append((rr, cc))
        neighborhood_mask[rr, cc] = True

    # turn distances into weights for the nearest neighbors
    np.exp(-weights, weights)  # in-place

    # create initial surface on unit sphere
    S0 = np.dstack((x, y, np.ones_like(x)))
    S0 /= np.linalg.norm(S0, axis=-1)[:, :, np.newaxis]

    r_fixed = np.linalg.norm(points3D, axis=-1)  # fixed 3D depths

    specular_mask = (L < 1.)
    vmask = np.zeros_like(S0[:, :, 2])
    for k, (u, v) in enumerate(points2D_image):
        j0, i0 = int(u), int(v)  # upper left pixel for the (u,v) coordinate
        vmask[i0, j0] = points3D[k, 2]

    # iterative SFS algorithm
    # model_type: reflectance model type
    # fit_falloff: True to fit the 1/r^m falloff parameter; False to fix it at 2
    # extra_params: extra parameters to the reflectance model fit function
    def run_iterative_sfs(out_file, model_type, fit_falloff, *extra_params):
        #if os.path.exists(out_file + '_z.bin'): # don't re-run existing results
        #  return

        if model_type == sfs.LAMBERTIAN_MODEL:
            model_name = 'LAMBERTIAN_MODEL'
        elif model_type == sfs.OREN_NAYAR_MODEL:
            model_name = 'OREN_NAYAR_MODEL'
        elif model_type == sfs.PHONG_MODEL:
            model_name = 'PHONG_MODEL'
        elif model_type == sfs.COOK_TORRANCE_MODEL:
            model_name = 'COOK_TORRANCE_MODEL'
        elif model_type == sfs.POWER_MODEL:
            model_name = 'POWER_MODEL'

        if model_type == sfs.POWER_MODEL:
            print '%s_%i' % (model_name, extra_params[0])
        else:
            print model_name

        S = S0.copy()
        z = S0[:, :, 2]

        for iteration in xrange(MAX_NUM_ITER):
            print 'Iteration', iteration

            z_old = z

            if get_initial_warp:

                #z_gt=util.load_point_ply('C:\\Users\\user\\Documents\\UNC\\Research\\ColonProject\\code\\Rui\\SFS_CPU\\frame0859.jpg_gt.ply')
                # warp to 3D points
                if iteration > -1:
                    S, r_ratios = nearest_neighbor_warp(
                        weights, nn_idxs, points2D_image, r_fixed,
                        util.generate_surface(camera, z))
                    z_est = np.maximum(S[:, :, 2], 1e-6)
                    S = util.generate_surface(camera, z_est)

                else:
                    z_est = z

                S = util.generate_surface(camera, z_est)
                #util.save_sfs_ply('warp' + '.ply', S, im_rgb)
                #util.save_xyz('test.xyz',points3D);
                #z=z_est
                #break
                #z_est=z

            else:
                #import pdb;pdb.set_trace()
                #z_est = extract_depth_map(camera,ref_surf_name,R,image)
                z_est = np.fromfile(
                    'C:\Users\user\Documents\UNC\Research\ColonProject\code\SFS_Program_from_True\endo_evaluation\gt_surfaces\\frame0859.jpg.bin',
                    dtype=np.float32).reshape(camera.height,
                                              camera.width) / WORLD_SCALE
                z_est = z_est.astype(float)
                #S, r_ratios = nearest_neighbor_warp(weights, nn_idxs,
                #    points2D_image, r_fixed, util.generate_surface(camera, z_est))
                z_est = np.maximum(z_est[:, :], 1e-6)
                #Sworld = (S - image.tvec[np.newaxis,np.newaxis,:]).dot(R)
                S = util.generate_surface(camera, z_est)
                #util.save_sfs_ply('test' + '.ply', S, im_rgb)
                #util.save_sfs_ply(out_file + '_warp_%i.ply' % iteration, Sworld, im_rgb)
                #import pdb;pdb.set_trace()
                # if we need to, make corrections for non-positive depths

                #S = util.generate_surface(camera, z_est)

            mask = (z_est < INIT_Z)

            specular_mask = (L < 0.8)
            dark_mask = (L > 0.1)
            _mask = np.logical_and(specular_mask, mask)
            _mask = np.logical_and(_mask, dark_mask)
            # fit reflectance model
            r = np.linalg.norm(S, axis=-1)
            ndotl = util.calculate_ndotl(camera, S)
            falloff, model_params, residual = fit_reflectance_model(
                model_type, L[_mask], r.ravel(), r[_mask], ndotl.ravel(),
                ndotl[_mask], fit_falloff, camera.width, camera.height,
                *extra_params)
            #r = np.linalg.norm(S[specular_mask], axis=-1)

            #import pdb;pdb.set_trace()
            #model_params=np.array([26.15969874,-27.674055,-12.52426,7.579855,21.9768004,24.3911142,-21.7282996,-19.850894,-11.62229,-4.837014])
            #model_params=np.array([-19.4837,-490.4796,812.4527,-426.09107,139.2602,351.8061,-388.1591,875.5013,-302.4748,-414.4384])
            #falloff = 1.2
            #ndotl = util.calculate_ndotl(camera, S)[specular_mask]
            #falloff, model_params, residual = fit_reflectance_model(model_type,
            #    L[specular_mask], r, ndotl, fit_falloff, *extra_params)
            #r = np.linalg.norm(S[neighborhood_mask], axis=-1)
            #ndotl = util.calculate_ndotl(camera, S)[neighborhood_mask]
            #falloff, model_params, residual = fit_reflectance_model(model_type,
            #    L[neighborhood_mask], r, ndotl, fit_falloff, *extra_params)

            # lambda values reflect our confidence in the current surface: 0
            # corresponds to only using SFS at a pixel, 1 corresponds to equally
            # weighting SFS and the current estimate, and larger values increasingly
            # favor using only the current estimate

            rdiff = np.abs(r_fixed - get_estimated_r(S, points2D_image))
            w = np.log10(r_fixed) - np.log10(rdiff) - np.log10(2.)
            lambdas = (np.sum(weights * w[nn_idxs], axis=-1) /
                       np.sum(weights, axis=-1))
            lambdas = np.maximum(lambdas, 0.)  # just in case
            #
            lambdas[~mask] = 0

            #if iteration == 0: # don't use current estimated surface on first pass
            #lambdas = np.zeros_like(z)
            #else:
            #  r_ratios_postwarp = r_fixed / get_estimated_r(S, points2D_image)
            #  ratio_diff = np.abs(r_ratios_prewarp - r_ratios_postwarp)
            #  ratio_diff[ratio_diff == 0] = 1e-10 # arbitrarily high lambda
            #  feature_lambdas = 1. / ratio_diff
            #  lambdas = (np.sum(weights * feature_lambdas[nn_idxs], axis=-1) /
            #    np.sum(weights, axis=-1))

            # run SFS

            H_lut, dH_lut = compute_H_LUT(model_type, model_params,
                                          NUM_H_LUT_BINS)
            #import pdb;pdb.set_trace()
            # run SFS
            #H_lut = np.ascontiguousarray(H_lut.astype(np.float32))
            #dH_lut = np.ascontiguousarray(dH_lut.astype(np.float32))

            z = run_sfs(H_lut, dH_lut, camera, L, lambdas, z_est, model_type,
                        model_params, falloff, vmask,
                        use_image_weighted_derivatives)

            # check for convergence
            #diff = np.sum(np.abs(z_old[specular_mask] - z[specular_mask]))
            #if diff < CONVERGENCE_THRESHOLD * camera.height * camera.width:
            #  break

            # save the surface
            #S = util.generate_surface(camera, z)
            #S = (S - image.tvec[np.newaxis,np.newaxis,:]).dot(R)
            #util.save_sfs_ply(out_file + '_%i.ply' % iteration, S, im_rgb)
        else:
            print 'DID NOT CONVERGE'

        #import pdb;pdb.set_trace()
        S = util.generate_surface(camera, z)
        #S = (S - image.tvec[np.newaxis,np.newaxis,:]).dot(R)

        util.save_sfs_ply(out_file + '.ply', S, im_rgb)

        z.astype(np.float32).tofile(out_file + '_z.bin')

        # save the surface
        #S = util.generate_surface(camera, z)
        #S = (S - image.tvec[np.newaxis,np.newaxis,:]).dot(R)
        #S, r_ratios = nearest_neighbor_warp(weights, nn_idxs,
        #    points2D_image, r_fixed, util.generate_surface(camera, z))
        #util.save_sfs_ply(out_file + '_warped.ply', S, im_rgb)
        #z = np.maximum(S[:,:,2], 1e-6)
        #z.astype(np.float32).tofile(out_file + '_warped_z.bin')

        #reflectance_models.save_reflectance_model(out_file + '_reflectance.txt',
        #    model_name, residual, model_params, falloff, *extra_params)

        print

    # now, actually run SFS
    if not out_folder.endswith('/'):
        out_folder += '/'

    if not os.path.exists(out_folder):
        os.makedirs(out_folder)

#  if not os.path.exists(out_folder + 'lambertian/'):
#    os.mkdir(out_folder + 'lambertian/')
#  run_iterative_sfs(out_folder + 'lambertian/' + image.name,
#      sfs.LAMBERTIAN_MODEL, estimate_falloff)
#  if not os.path.exists(out_folder + 'oren_nayar/'):
#    os.mkdir(out_folder + 'oren_nayar/')
#  run_iterative_sfs(out_folder + 'oren_nayar/' + image.name,
#      sfs.OREN_NAYAR_MODEL, estimate_falloff)

#  if not os.path.exists(out_folder + 'phong/'):
#    os.mkdir(out_folder + 'phong/')
#  run_iterative_sfs(out_folder + 'phong/' + image.name, sfs.PHONG_MODEL,
#      estimate_falloff)
#  if not os.path.exists(out_folder + 'cook_torrance/'):
#    os.mkdir(out_folder + 'cook_torrance/')
#  run_iterative_sfs(out_folder + 'cook_torrance/' + image.name,
#      sfs.COOK_TORRANCE_MODEL, estimate_falloff)

#for K in [1, 2, 3, 4, 5, 10, 20, 50, 100]:
#for K in [10, 20, 50]:
#for K in [1, 2, 3, 4, 5]:

    for K in [100]:
        #if not os.path.exists(out_folder + 'power_model_%i/' % K):
        #  os.mkdir(out_folder + 'power_model_%i/' % K)
        run_iterative_sfs(out_folder + image.name, sfs.POWER_MODEL,
                          estimate_falloff, K)
Ejemplo n.º 7
0
def run(image_name, image_path, colmap_folder, out_folder, min_track_len,
        min_tri_angle, max_tri_angle, ref_surf_name, max_num_points=None,
        estimate_falloff=True, use_image_weighted_derivatives=True,get_initial_warp=True):




  min_track_len = int(min_track_len)
  min_tri_angle = int(min_tri_angle)
  max_tri_angle = int(max_tri_angle)

  #est_global_ref.estimate_overall_ref_model(colmap_folder,min_track_len,min_tri_angle,max_tri_angle,image_path)
  
  try:
    max_num_points = int(max_num_points)
  except:
    max_num_points = None

  get_initial_warp = (get_initial_warp not in ('False', 'false'))

  estimate_falloff = (estimate_falloff not in ('False', 'false'))
  use_image_weighted_derivatives = (use_image_weighted_derivatives not in ('False', 'false'))


  #if not image_path.endswith('/'):
  #  image_path += '/'

  print 'Loading COLMAP data'
  scene_manager = SceneManager(colmap_folder)
  scene_manager.load_cameras()
  scene_manager.load_images()
  image_id = scene_manager.get_image_id_from_name(image_name)
  image = scene_manager.images[image_id]
  camera = scene_manager.get_camera(image.camera_id)

  # image pose
  R = util.quaternion_to_rotation_matrix(image.qvec)

  print 'Loading 3D points'
  scene_manager.load_points3D()
  scene_manager.filter_points3D(min_track_len,
    min_tri_angle=min_tri_angle, max_tri_angle=max_tri_angle,
    image_list=set([image_id]))

  points3D, points2D = scene_manager.get_points3D(image_id)
  points3D = points3D.dot(R.T) + image.tvec[np.newaxis,:]

  # need to remove redundant points
  # http://stackoverflow.com/questions/16970982/find-unique-rows-in-numpy-array
  points2D_view = np.ascontiguousarray(points2D).view(
    np.dtype((np.void, points2D.dtype.itemsize * points2D.shape[1])))
  _, idx = np.unique(points2D_view, return_index=True)

  points2D, points3D = points2D[idx], points3D[idx]

  # further rule out any points too close to the image border (for bilinear
  # interpolation)
  mask = (
      (points2D[:,0] < camera.width - 1) & (points2D[:,1] < camera.height - 1))
  points2D, points3D = points2D[mask], points3D[mask]

  points2D_image = points2D.copy() # coordinates in image space
  points2D = np.hstack((points2D, np.ones((points2D.shape[0], 1))))
  points2D = points2D.dot(np.linalg.inv(camera.get_camera_matrix()).T)[:,:2]

  if (max_num_points is not None and max_num_points > 0 and
      max_num_points < len(points2D)):
    np.random.seed(0) # fix the "random" points selected
    selected_points = np.random.choice(len(points2D), max_num_points, False)
    points2D = points2D[selected_points]
    points2D_image = points2D_image[selected_points]
    points3D = points3D[selected_points]

  print len(points3D), 'total points'

  #perturb points

  #import pdb;pdb.set_trace()
  #perturb_points=np.random.choice(len(points2D),max_num_points*0.3,False)
  #randomperturb=np.random.rand(perturb_points.size)*2
  #points3D[perturb_points,2]=points3D[perturb_points,2]*randomperturb


  #points3D[2][2]=points3D[2][2]*0.5
  #points3D[4][2]=points3D[4][2]*1.5
  #points3D[8][2]=points3D[8][2]*0.3
  #points3D[9][2]=points3D[9][2]*2.
  # load image
  #image_file = scene_manager.image_path + image.name
  image_file = image_path + image.name
  im_rgb = skimage.img_as_float(skimage.io.imread(image_file)) # color image
  L = skimage.color.rgb2lab(im_rgb)[:,:,0] * 0.01

  #import pdb;pdb.set_trace()

  #skimage.io.imsave('test.png',L)

  L = np.maximum(L, 1e-6) # unfortunately, can't have black pixels, since we
                          # divide by L

  # initial values on unit sphere
  x, y = camera.get_image_grid()

  print 'Computing nearest neighbors'

  #import pdb;pdb.set_trace()
  nn_idxs, weights = compute_nn(points2D.astype(np.float32), camera.width,
      camera.height, camera.fx, camera.fy, camera.cx, camera.cy)
  nn_idxs=np.swapaxes(nn_idxs,0,2)
  nn_idxs=np.swapaxes(nn_idxs,0,1)

  weights=np.swapaxes(weights,0,2)
  weights=np.swapaxes(weights,0,1)  
  
  #kdtree = KDTree(points2D)
  #weights, nn_idxs = kdtree.query(np.c_[x.ravel(),y.ravel()], KD_TREE_KNN)
  #weights = weights.reshape(camera.height, camera.width, KD_TREE_KNN)
  #nn_idxs = nn_idxs.reshape(camera.height, camera.width, KD_TREE_KNN)

  # figure out pixel neighborhoods for each point
  #neighborhoods = []
  neighborhood_mask = np.zeros((camera.height, camera.width), dtype=np.bool)
  for v, u in points2D_image:
    rr, cc = circle(int(u), int(v), MAX_POINT_DISTANCE,
        (camera.height, camera.width))
    #neighborhoods.append((rr, cc))
    neighborhood_mask[rr,cc] = True

  # turn distances into weights for the nearest neighbors
  np.exp(-weights, weights) # in-place

  # create initial surface on unit sphere
  S0 = np.dstack((x, y, np.ones_like(x)))
  S0 /= np.linalg.norm(S0, axis=-1)[:,:,np.newaxis]

  r_fixed = np.linalg.norm(points3D, axis=-1) # fixed 3D depths

  specular_mask = (L < 1.)
  vmask=np.zeros_like(S0[:,:,2])
  for k, (u, v) in enumerate(points2D_image):
    j0, i0 = int(u), int(v) # upper left pixel for the (u,v) coordinate
    vmask[i0,j0]=points3D[k,2]

  # iterative SFS algorithm
  # model_type: reflectance model type
  # fit_falloff: True to fit the 1/r^m falloff parameter; False to fix it at 2
  # extra_params: extra parameters to the reflectance model fit function
  def run_iterative_sfs(out_file, model_type, fit_falloff, *extra_params):
    #if os.path.exists(out_file + '_z.bin'): # don't re-run existing results
    #  return

    if model_type == sfs.LAMBERTIAN_MODEL:
      model_name = 'LAMBERTIAN_MODEL'
    elif model_type == sfs.OREN_NAYAR_MODEL:
      model_name = 'OREN_NAYAR_MODEL'
    elif model_type == sfs.PHONG_MODEL:
      model_name = 'PHONG_MODEL'
    elif model_type == sfs.COOK_TORRANCE_MODEL:
      model_name = 'COOK_TORRANCE_MODEL'
    elif model_type == sfs.POWER_MODEL:
      model_name = 'POWER_MODEL'

    if model_type == sfs.POWER_MODEL:
      print '%s_%i' % (model_name, extra_params[0])
    else:
      print model_name

    S = S0.copy()
    z = S0[:,:,2]

    for iteration in xrange(MAX_NUM_ITER):
      print 'Iteration', iteration

      z_old = z

      if get_initial_warp:

      	#z_gt=util.load_point_ply('C:\\Users\\user\\Documents\\UNC\\Research\\ColonProject\\code\\Rui\\SFS_CPU\\frame0859.jpg_gt.ply')
        # warp to 3D points
        if iteration>-1:
        	S, r_ratios = nearest_neighbor_warp(weights, nn_idxs,
        	    points2D_image, r_fixed, util.generate_surface(camera, z))
        	z_est = np.maximum(S[:,:,2], 1e-6)
        	S=util.generate_surface(camera, z_est)

        else:
        	z_est=z

        S=util.generate_surface(camera, z_est)
        #util.save_sfs_ply('warp' + '.ply', S, im_rgb)
        #util.save_xyz('test.xyz',points3D);
        #z=z_est
        #break
        #z_est=z

      else:
        #import pdb;pdb.set_trace()
        #z_est = extract_depth_map(camera,ref_surf_name,R,image)
        z_est=np.fromfile(
          'C:\Users\user\Documents\UNC\Research\ColonProject\code\SFS_Program_from_True\endo_evaluation\gt_surfaces\\frame0859.jpg.bin', dtype=np.float32).reshape(
            camera.height, camera.width)/WORLD_SCALE
        z_est=z_est.astype(float)
        #S, r_ratios = nearest_neighbor_warp(weights, nn_idxs,
        #    points2D_image, r_fixed, util.generate_surface(camera, z_est))
        z_est = np.maximum(z_est[:,:], 1e-6)
        #Sworld = (S - image.tvec[np.newaxis,np.newaxis,:]).dot(R)
        S = util.generate_surface(camera, z_est)
        #util.save_sfs_ply('test' + '.ply', S, im_rgb)
        #util.save_sfs_ply(out_file + '_warp_%i.ply' % iteration, Sworld, im_rgb)
        #import pdb;pdb.set_trace()
        # if we need to, make corrections for non-positive depths
      
        #S = util.generate_surface(camera, z_est)

      mask = (z_est < INIT_Z)

      specular_mask=(L<0.8)
      dark_mask=(L>0.1)
      _mask=np.logical_and(specular_mask,mask)
      _mask=np.logical_and(_mask,dark_mask)
      # fit reflectance model
      r = np.linalg.norm(S, axis=-1)
      ndotl = util.calculate_ndotl(camera, S)
      falloff, model_params, residual = fit_reflectance_model(model_type,
          L[_mask],r.ravel(), r[_mask],ndotl.ravel(), ndotl[_mask], fit_falloff,camera.width,camera.height, *extra_params)
      #r = np.linalg.norm(S[specular_mask], axis=-1)

      #import pdb;pdb.set_trace()
      #model_params=np.array([26.15969874,-27.674055,-12.52426,7.579855,21.9768004,24.3911142,-21.7282996,-19.850894,-11.62229,-4.837014])
      #model_params=np.array([-19.4837,-490.4796,812.4527,-426.09107,139.2602,351.8061,-388.1591,875.5013,-302.4748,-414.4384])
      #falloff = 1.2
      #ndotl = util.calculate_ndotl(camera, S)[specular_mask]
      #falloff, model_params, residual = fit_reflectance_model(model_type,
      #    L[specular_mask], r, ndotl, fit_falloff, *extra_params)
      #r = np.linalg.norm(S[neighborhood_mask], axis=-1)
      #ndotl = util.calculate_ndotl(camera, S)[neighborhood_mask]
      #falloff, model_params, residual = fit_reflectance_model(model_type,
      #    L[neighborhood_mask], r, ndotl, fit_falloff, *extra_params)

      # lambda values reflect our confidence in the current surface: 0
      # corresponds to only using SFS at a pixel, 1 corresponds to equally
      # weighting SFS and the current estimate, and larger values increasingly
      # favor using only the current estimate

      rdiff = np.abs(r_fixed - get_estimated_r(S, points2D_image))
      w = np.log10(r_fixed) - np.log10(rdiff) - np.log10(2.)
      lambdas = (np.sum(weights * w[nn_idxs], axis=-1) /
          np.sum(weights, axis=-1))
      lambdas = np.maximum(lambdas, 0.) # just in case
#
      lambdas[~mask] = 0

      #if iteration == 0: # don't use current estimated surface on first pass
      #lambdas = np.zeros_like(z)
      #else:
      #  r_ratios_postwarp = r_fixed / get_estimated_r(S, points2D_image)
      #  ratio_diff = np.abs(r_ratios_prewarp - r_ratios_postwarp)
      #  ratio_diff[ratio_diff == 0] = 1e-10 # arbitrarily high lambda
      #  feature_lambdas = 1. / ratio_diff
      #  lambdas = (np.sum(weights * feature_lambdas[nn_idxs], axis=-1) /
      #    np.sum(weights, axis=-1))

      # run SFS

      H_lut, dH_lut = compute_H_LUT(model_type, model_params, NUM_H_LUT_BINS)
      #import pdb;pdb.set_trace()
      # run SFS
      #H_lut = np.ascontiguousarray(H_lut.astype(np.float32))
      #dH_lut = np.ascontiguousarray(dH_lut.astype(np.float32))


      z = run_sfs(H_lut,dH_lut,camera, L, lambdas, z_est, model_type, model_params, falloff,vmask,
          use_image_weighted_derivatives)

      # check for convergence
      #diff = np.sum(np.abs(z_old[specular_mask] - z[specular_mask]))
      #if diff < CONVERGENCE_THRESHOLD * camera.height * camera.width:
      #  break
      
      # save the surface
      #S = util.generate_surface(camera, z)
      #S = (S - image.tvec[np.newaxis,np.newaxis,:]).dot(R)
      #util.save_sfs_ply(out_file + '_%i.ply' % iteration, S, im_rgb)
    else:
      print 'DID NOT CONVERGE'

    #import pdb;pdb.set_trace()
    S = util.generate_surface(camera, z)
    #S = (S - image.tvec[np.newaxis,np.newaxis,:]).dot(R)
   
    util.save_sfs_ply(out_file + '.ply', S, im_rgb)
   
    z.astype(np.float32).tofile(out_file + '_z.bin')

    # save the surface
    #S = util.generate_surface(camera, z)
    #S = (S - image.tvec[np.newaxis,np.newaxis,:]).dot(R)
    #S, r_ratios = nearest_neighbor_warp(weights, nn_idxs,
    #    points2D_image, r_fixed, util.generate_surface(camera, z))    
    #util.save_sfs_ply(out_file + '_warped.ply', S, im_rgb)
    #z = np.maximum(S[:,:,2], 1e-6)
    #z.astype(np.float32).tofile(out_file + '_warped_z.bin')

    #reflectance_models.save_reflectance_model(out_file + '_reflectance.txt',
    #    model_name, residual, model_params, falloff, *extra_params)

    print


  # now, actually run SFS
  if not out_folder.endswith('/'):
    out_folder += '/'

  if not os.path.exists(out_folder):
    os.makedirs(out_folder)

#  if not os.path.exists(out_folder + 'lambertian/'):
#    os.mkdir(out_folder + 'lambertian/')
#  run_iterative_sfs(out_folder + 'lambertian/' + image.name,
#      sfs.LAMBERTIAN_MODEL, estimate_falloff)
#  if not os.path.exists(out_folder + 'oren_nayar/'):
#    os.mkdir(out_folder + 'oren_nayar/')
#  run_iterative_sfs(out_folder + 'oren_nayar/' + image.name,
#      sfs.OREN_NAYAR_MODEL, estimate_falloff)

#  if not os.path.exists(out_folder + 'phong/'):
#    os.mkdir(out_folder + 'phong/')
#  run_iterative_sfs(out_folder + 'phong/' + image.name, sfs.PHONG_MODEL,
#      estimate_falloff)
#  if not os.path.exists(out_folder + 'cook_torrance/'):
#    os.mkdir(out_folder + 'cook_torrance/')
#  run_iterative_sfs(out_folder + 'cook_torrance/' + image.name,
#      sfs.COOK_TORRANCE_MODEL, estimate_falloff)

  #for K in [1, 2, 3, 4, 5, 10, 20, 50, 100]:
  #for K in [10, 20, 50]:
  #for K in [1, 2, 3, 4, 5]:

  for K in [100]:
    #if not os.path.exists(out_folder + 'power_model_%i/' % K):
    #  os.mkdir(out_folder + 'power_model_%i/' % K)
    run_iterative_sfs(out_folder + image.name,
        sfs.POWER_MODEL, estimate_falloff, K)