def affine_estimate(self, w, depth_reg=0.085, weights=None, scale=10.0, scale_mean=0.0016 * 1.8 * 1.2, scale_std=1.2 * 0, cap_scale=-0.00129): """ Quick switch to allow reconstruction at unknown scale returns a,r and scale """ weights = np.zeros((0, 0, 0)) if weights is None else weights s = np.empty((self.sigma.shape[0], self.sigma.shape[1] + 4)) # e,y,x,z s[:, :4] = 10 ** -5 # Tiny but makes stuff well-posed s[:, 0] = scale_std s[:, 4:] = self.sigma s[:, 4:-1] *= scale e2 = np.zeros((self.e.shape[0], self.e.shape[ 1] + 4, 3, self.e.shape[3])) e2[:, 1, 0] = 1.0 e2[:, 2, 1] = 1.0 e2[:, 3, 0] = 1.0 # This makes the least_squares problem ill posed, as X,Z are # interchangable # Hence regularisation above to speed convergence and stop blow-up e2[:, 0] = self.mu e2[:, 4:] = self.e t_m = np.zeros_like(self.mu) res, a, r = pick_e(w, e2, t_m, self.cam, s, weights=weights, interval=0.01, depth_reg=depth_reg, scale_prior=scale_mean) scale = a[:, :, 0] reestimate = scale > cap_scale m = self.mu * cap_scale for i in range(scale.shape[0]): if reestimate[i].sum() > 0: ehat = e2[i:i + 1, 1:] mhat = m[i:i + 1] shat = s[i:i + 1, 1:] (res2, a2, r2) = pick_e( w[reestimate[i]], ehat, mhat, self.cam, shat, weights=weights[reestimate[i]], interval=0.01, depth_reg=depth_reg, scale_prior=scale_mean ) res[i:i + 1, reestimate[i]] = res2 a[i:i + 1, reestimate[i], 1:] = a2 a[i:i + 1, reestimate[i], 0] = cap_scale r[i:i + 1, :, reestimate[i]] = r2 scale = a[:, :, 0] a = a[:, :, 1:] / a[:, :, 0][:, :, np.newaxis] return res, e2[:, 1:], a, r, scale