예제 #1
0
    def test_reproducibility(self):
        np.random.seed(514)
        x = special_ortho_group.rvs(3)
        expected = np.array([[0.99394515, -0.04527879, 0.10011432],
                             [-0.04821555, 0.63900322, 0.76769144],
                             [-0.09873351, -0.76787024, 0.63295101]])
        assert_array_almost_equal(x, expected)

        random_state = np.random.RandomState(seed=514)
        x = special_ortho_group.rvs(3, random_state=random_state)
        assert_array_almost_equal(x, expected)
예제 #2
0
    def test_haar(self):
        # Test that the distribution is constant under rotation
        # Every column should have the same distribution
        # Additionally, the distribution should be invariant under another rotation

        # Generate samples
        dim = 5
        samples = 1000  # Not too many, or the test takes too long
        ks_prob = 0.39  # ...so don't expect much precision
        np.random.seed(514)
        xs = special_ortho_group.rvs(dim, size=samples)

        # Dot a few rows (0, 1, 2) with unit vectors (0, 2, 4, 3),
        #   effectively picking off entries in the matrices of xs.
        #   These projections should all have the same disribution,
        #     establishing rotational invariance. We use the two-sided
        #     KS test to confirm this.
        #   We could instead test that angles between random vectors
        #     are uniformly distributed, but the below is sufficient.
        #   It is not feasible to consider all pairs, so pick a few.
        els = ((0,0), (0,2), (1,4), (2,3))
        #proj = {(er, ec): [x[er][ec] for x in xs] for er, ec in els}
        proj = dict(((er, ec), sorted([x[er][ec] for x in xs])) for er, ec in els)
        pairs = [(e0, e1) for e0 in els for e1 in els if e0 > e1]
        ks_tests = [ks_2samp(proj[p0], proj[p1])[1] for (p0, p1) in pairs]
        assert_array_less([ks_prob]*len(pairs), ks_tests)
예제 #3
0
    def test_frozen_matrix(self):
        dim = 7
        frozen = special_ortho_group(dim)

        rvs1 = frozen.rvs(random_state=1234)
        rvs2 = special_ortho_group.rvs(dim, random_state=1234)

        assert_equal(rvs1, rvs2)
예제 #4
0
    def test_det_and_ortho(self):
        xs = [special_ortho_group.rvs(dim)
              for dim in range(2,12)
              for i in range(3)]

        # Test that determinants are always +1
        dets = [np.linalg.det(x) for x in xs]
        assert_allclose(dets, [1.]*30, rtol=1e-13)

        # Test that these are orthogonal matrices
        for x in xs:
            assert_array_almost_equal(np.dot(x, x.T),
                                      np.eye(x.shape[0]))
예제 #5
0
def test_pose_setter() -> None:
    R_cw = special_ortho_group.rvs(3)
    t_cw = np.random.rand(3)
    T_cw = np.vstack((np.column_stack((R_cw, t_cw)), np.array([0, 0, 0, 1])))
    T_wc = np.linalg.inv(T_cw)
    r_cw = cv2.Rodrigues(R_cw)[0].flatten()
    r_wc = -r_cw

    # set world to cam
    p1 = pygeometry.Pose()
    p1.set_from_world_to_cam(T_cw)
    _helper_pose_equal_to_T(p1, T_cw)

    p2 = pygeometry.Pose()
    p2.set_from_world_to_cam(R_cw, t_cw)
    _helper_pose_equal_to_T(p2, T_cw)

    p3 = pygeometry.Pose()
    p3.set_from_world_to_cam(r_cw, t_cw)
    _helper_pose_equal_to_T(p3, T_cw)

    # set cam to world
    p4 = pygeometry.Pose()
    p4.set_from_cam_to_world(T_wc)
    _helper_pose_equal_to_T(p4, T_cw)

    p5 = pygeometry.Pose()
    p5.set_from_cam_to_world(T_wc[0:3, 0:3], T_wc[0:3, 3])
    _helper_pose_equal_to_T(p5, T_cw)

    p6 = pygeometry.Pose()
    p6.set_from_cam_to_world(r_wc, T_wc[0:3, 3])
    _helper_pose_equal_to_T(p6, T_cw)

    # set rotation, translation
    p7 = pygeometry.Pose()
    p7.rotation = r_cw
    p7.translation = t_cw
    _helper_pose_equal_to_T(p7, T_cw)

    p8 = pygeometry.Pose()
    p8.set_rotation_matrix(R_cw)
    p8.translation = t_cw
    _helper_pose_equal_to_T(p7, T_cw)
예제 #6
0
def test_pose_init():
    R_cw = special_ortho_group.rvs(3)
    t_cw = np.random.rand(3)
    T_cw = np.vstack((np.column_stack((R_cw, t_cw)), np.array([0, 0, 0, 1])))
    pose = pygeometry.Pose(R_cw, t_cw)
    _helper_pose_equal_to_T(pose, T_cw)

    r_cw = cv2.Rodrigues(T_cw[0:3, 0:3])[0].flatten()
    pose2 = pygeometry.Pose(r_cw, t_cw)
    _helper_pose_equal_to_T(pose2, T_cw)
    _heper_poses_equal(pose, pose2)

    # Test default init
    pose3 = pygeometry.Pose()
    _helper_pose_equal_to_T(pose3, np.eye(4))
    pose4 = pygeometry.Pose(T_cw[0:3, 0:3])
    _helper_pose_equal_to_T(pose4, np.vstack((np.column_stack((T_cw[0:3, 0:3], np.zeros((3,1)))), np.array([0, 0, 0, 1]))))
    pose5 = pygeometry.Pose(r_cw)
    _helper_pose_equal_to_T(pose5, np.vstack((np.column_stack((T_cw[0:3, 0:3], np.zeros((3,1)))), np.array([0, 0, 0, 1]))))
  def create_rotated_abst_atoms_in_mol(
    self, size, id_start, mol, shift_x, shift_y, shift_z):

    initial = np.array([
      [size, 0.0, 0.0], [-size, 0.0, 0.0],
      [0.0, size, 0.0], [0.0, -size, 0.0],
      [0.0, 0.0, size], [0.0, 0.0, -size]])

    # create a random rotation matrix
    rot_mat = special_ortho_group.rvs(3)

    # rotate
    rotated = np.dot(rot_mat, initial.T).T

    return [{
      "id": id_start+i, "mol": mol, "mass": 12.011,
      "xu": rotated[i,0] + shift_x,
      "yu": rotated[i,1] + shift_y,
      "zu": rotated[i,2] + shift_z} for i in range(6)]
예제 #8
0
    def __init__(self, dim, alpha_label, theta0 = None, local_alpha=True,
                 keepItems = False, max_rot_dim = None, ctw=False):

            self.ctw = ctw
        
            self.max_rot_dim = max_rot_dim
            self.local_alpha = local_alpha

            if max_rot_dim > 0:
                #random rotation matrix
                print("generating random rotation matrix...")
                if dim > max_rot_dim:
                    print("using "+ str(max_rot_dim) + "random axes")
                    rot_axes = np.random.choice(range(dim),max_rot_dim,replace=False)
                    self.rot_mask = [True if i in rot_axes else False for i in range(dim)]  
                    rot_dim = max_rot_dim
                else:
                    self.rot_mask = None
                    rot_dim = dim
    
                self.R =  special_ortho_group.rvs(rot_dim)
            else:
                self.R = None
                print("No rotation.")


            self.theta0 = theta0

            self.dim = dim
            self.alpha_label =  alpha_label
            
            #set this value to avoid learning global proportion
            if self.theta0 is not None:
                self.P0Dist = [lp.LogWeightProb(p) for p in theta0]
            else:
                self.P0Dist = None

            self.n = 0

            #keep items in each node for post-hoc analysis: high memory consumption
            self.keepItems = keepItems

            self.root = SeqNode(depth=0,tree=self)
    def generate_transform(self):
        """Generate a random SE3 transformation (3, 4) """

        if self._random_mag:
            attentuation = np.random.random()
            rot_mag, trans_mag = attentuation * self._rot_mag, attentuation * self._trans_mag
        else:
            rot_mag, trans_mag = self._rot_mag, self._trans_mag

        # Generate rotation
        rand_rot = special_ortho_group.rvs(3)
        axis_angle = Rotation.as_rotvec(Rotation.from_dcm(rand_rot))
        axis_angle *= rot_mag / 180.0
        rand_rot = Rotation.from_rotvec(axis_angle).as_dcm()

        # Generate translation
        rand_trans = np.random.uniform(-trans_mag, trans_mag, 3)
        rand_SE3 = np.concatenate((rand_rot, rand_trans[:, None]), axis=1).astype(np.float32)

        return rand_SE3
예제 #10
0
  def test_quadratic_with_strong_skew(self):
    """Can minimize a strongly skewed quadratic function."""
    np.random.seed(89793)
    minimum = np.random.randn(3)
    principal_values = np.diag(np.array([0.1, 2.0, 50.0]))
    rotation = special_ortho_group.rvs(3)
    hessian = np.dot(np.transpose(rotation), np.dot(principal_values, rotation))
    def quadratic(x):
      y = x - minimum
      yp = tf.tensordot(hessian, y, axes=[1, 0])
      return tf.reduce_sum(input_tensor=y * yp) / 2

    start = tf.ones_like(minimum)
    results = self.evaluate(tfp.optimizer.nelder_mead_minimize(
        quadratic,
        initial_vertex=start,
        func_tolerance=1e-12,
        batch_evaluate_objective=False))
    self.assertTrue(results.converged)
    self.assertArrayNear(results.position, minimum, 1e-5)
예제 #11
0
  def test_quadratic_with_strong_skew(self):
    """Can minimize a strongly skewed quadratic function."""
    np.random.seed(89793)
    minimum = np.random.randn(3)
    principal_values = np.diag(np.array([0.1, 2.0, 50.0]))
    rotation = special_ortho_group.rvs(3)
    hessian = np.dot(np.transpose(rotation), np.dot(principal_values, rotation))

    @_make_val_and_grad_fn
    def quadratic(x):
      y = x - minimum
      yp = tf.tensordot(hessian, y, axes=[1, 0])
      return tf.reduce_sum(y * yp) / 2

    start = tf.ones_like(minimum)
    results = self.evaluate(tfp.optimizer.lbfgs_minimize(
        quadratic, initial_position=start, tolerance=1e-8))
    self.assertTrue(results.converged)
    self.assertLessEqual(_norm(results.objective_gradient), 1e-8)
    self.assertArrayNear(results.position, minimum, 1e-5)
예제 #12
0
def test_rot_to_quat():
    from scipy.spatial.transform import Rotation as R
    import numpy as np
    from scipy.stats import special_ortho_group
    from rotations import RearangeQuat
    import time
    bs = 1000
    re_q = RearangeQuat(bs)
    mat = special_ortho_group.rvs(dim=3, size=bs)

    quat = R.from_matrix(mat).as_quat()
    q_test = rot_to_quat(torch.tensor(mat), conv='wxyz')

    print(quat, '\n \n ', q_test)
    m = q_test[:, 0] > 0

    mat2 = R.from_quat(q_test.numpy()).as_matrix()

    print("Fiff",
          torch.sum(torch.norm(torch.tensor(mat - mat2), dim=(1, 2)), dim=0))
예제 #13
0
  def test_quadratic_with_strong_skew(self):
    """Can minimize a strongly skewed quadratic function."""
    np.random.seed(89793)
    minimum = np.random.randn(3)
    principal_values = np.diag(np.array([0.1, 2.0, 50.0]))
    rotation = special_ortho_group.rvs(3)
    hessian = np.dot(np.transpose(rotation), np.dot(principal_values, rotation))
    def quadratic(x):
      y = x - minimum
      yp = tf.tensordot(hessian, y, axes=[1, 0])
      return tf.reduce_sum(y * yp) / 2

    start = tf.ones_like(minimum)
    results = self.evaluate(tfp.optimizer.nelder_mead_minimize(
        quadratic,
        initial_vertex=start,
        func_tolerance=1e-12,
        batch_evaluate_objective=False))
    self.assertTrue(results.converged)
    self.assertArrayNear(results.position, minimum, 1e-5)
예제 #14
0
    def __init__(self,
        dims_in,
        dims_c=[],
        subnet_constructor=None,
        clamp=2.,
        act_norm=1.,
        act_norm_type='SOFTPLUS',
        permute_soft=False
    ):

        super().__init__(dims_in, dims_c=dims_c, subnet_constructor=subnet_constructor, clamp=clamp)

        if act_norm_type == 'SIGMOID':
            act_norm = np.log(act_norm)
            self.actnorm_activation = (lambda a: 10 * torch.sigmoid(a - 2.))
        elif act_norm_type == 'SOFTPLUS':
            act_norm = 10. * act_norm
            self.softplus = nn.Softplus(beta=0.5)
            self.actnorm_activation = (lambda a: 0.1 * self.softplus(a))
        elif act_norm_type == 'EXP':
            act_norm = np.log(act_norm)
            self.actnorm_activation = (lambda a: torch.exp(a))
        else:
            raise ValueError('Please, SIGMOID, SOFTPLUS or EXP, as actnorm type')

        assert act_norm > 0., "please, this is not allowed. don't do it. take it... and go."
        channels = self.in_channels

        self.act_norm = nn.Parameter(torch.ones(1, channels, 1, 1) * float(act_norm))
        self.act_offset = nn.Parameter(torch.zeros(1, channels, 1, 1))

        if permute_soft:
            w = special_ortho_group.rvs(channels)
        else:
            w = np.zeros((channels,channels))
            for i,j in enumerate(np.random.permutation(channels)):
                w[i,j] = 1.
        w_inv = w.T

        self.w = nn.Parameter(torch.FloatTensor(w).view(channels, channels, 1, 1), requires_grad=False)
        self.w_inv = nn.Parameter(torch.FloatTensor(w_inv).view(channels, channels, 1, 1), requires_grad=False)
예제 #15
0
    def make_filament(self, position):
        # cylindrical coordinates
        [r, t, z] = position[:3]
        # Bam !
        radius_c = self.get_radius_c(r)  # radius of curvature
        length = self.get_length(r)  # length of filament
        n = int(length / self.step)
        filament = np.zeros((n, 3))
        thetas = np.arange(n) * (self.step / radius_c)

        # Creation and centering
        filament[:, 0] = radius_c * (np.cos(thetas) - np.mean(np.cos(thetas)))
        filament[:, 1] = radius_c * (np.sin(thetas) - np.mean(np.sin(thetas)))
        # rotation, using a scipy package to have a uniform 3D rotation
        if self.random_rotation:
            filament[:, :] = np.dot(filament, special_ortho_group.rvs(3))
        # Translation
        filament[:, 0] += self.Rmax * r * np.cos(t)
        filament[:, 1] += self.Rmax * r * np.sin(t)
        filament[:, 2] += z * self.Zmax
        return filament
예제 #16
0
def rotate_point_cloud(batch_data):
    """ Randomly rotate the point clouds to augument the dataset
        rotation is per shape based along up direction
        Input:
          BxNx3 array, original batch of point clouds
        Return:
          BxNx3 array, rotated batch of point clouds
    """
    rotated_data = np.zeros(batch_data.shape, dtype=np.float32)
    for k in range(batch_data.shape[0]):
        rotation_angle = np.random.uniform() * 2 * np.pi
        cosval = np.cos(rotation_angle)
        sinval = np.sin(rotation_angle)
        rotation_matrix = np.array([[cosval, sinval, 0], [-sinval, cosval, 0],
                                    [0, 0, 1]])
        shape_pc = batch_data[k, ...]
        # Mia
        from scipy.stats import special_ortho_group
        rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)),
                                      special_ortho_group.rvs(3))
    return rotated_data
예제 #17
0
    def generate_transform(self):
        """Generate a random SE3 transformation (3, 4) """

        if self._random_mag:
            rot_mag, trans_mag = np.random.uniform() * self._rot_mag, np.random.uniform() * self._trans_mag
        else:
            rot_mag, trans_mag = self._rot_mag, self._trans_mag

        # Generate rotation
        rand_rot = special_ortho_group.rvs(3)
        axis_angle = Rotation.as_rotvec(Rotation.from_dcm(rand_rot))
        axis_angle /= np.linalg.norm(axis_angle)
        axis_angle *= np.deg2rad(rot_mag)
        rand_rot = Rotation.from_rotvec(axis_angle).as_dcm()

        # Generate translation
        rand_trans = uniform_2_sphere()
        rand_trans *= np.random.uniform(high=trans_mag)
        rand_SE3 = np.concatenate((rand_rot, rand_trans[:, None]), axis=1).astype(np.float32)

        return rand_SE3
예제 #18
0
    def __init__(self,
                 Q=0.01,
                 D=32,
                 lower=-15,
                 upper=15,
                 random_rot=True,
                 randseed=0):

        self.Q = Q
        self.D = D
        self.lower = lower
        self.upper = upper
        self.randseed = randseed
        np.random.seed(randseed)

        self.A = special_ortho_group.rvs(D)
        self.random_rot = random_rot

        ir = np.array((np.full(self.D,
                               self.lower), np.full(self.D, self.upper))).T
        self.bound = ir
예제 #19
0
def rotate_shape(_scores, shape: ndarray = None, max_iter=20, tol=1e-10):
    "rotate scores to maximize their phase-shift to 90°"

    def objective(x, _scores: ndarray, shape: ndarray, k: int):
        from scipy.signal import hilbert

        R = x.reshape(k, k)
        scores = R.dot(_scores.T)
        select = np.triu(np.ones((k + 1, k + 1)), 1).flatten() == 1

        rvals = np.abs(np.corrcoef(scores, shape))
        rvals = [v for s, v in zip(select, rvals.flatten()) if s]
        best = np.max(np.abs(rvals))
        # the closer to one, the better, but we need to invert for minimize
        cost = 1 - best
        # print("Shapecost: ", cost)
        return cost

    if shape is None:
        from scarpa.generate.shapes import sinus

        print("Defaulting to sinus")
        shape = sinus(len(_scores))
    p, k = _scores.shape
    initial = special_ortho_group.rvs(k).flatten()
    cons = [{"type": "eq", "fun": constrain_identity, "args": [k]}]
    bnds = [(-1.01, 1.01)] * len(initial)
    solution = minimize(
        objective,
        args=(_scores, shape, k),
        x0=initial,
        method="SLSQP",
        bounds=bnds,
        constraints=cons,
    )

    R = solution.x.reshape(k, k)
    print("Shape Rotation finished after iteration", solution.nit, "with")
    pprint(R)
    return R
예제 #20
0
 def __init__(
     self,
     name: str,
     param_names: List[str],
     noise_sd: float = 0.0,
     lower_is_better: Optional[bool] = None,
 ) -> None:
     super().__init__(
         name=name,
         param_names=param_names,
         noise_sd=noise_sd,
         lower_is_better=lower_is_better,
     )
     # Set the random basis
     try:
         with open('data/random_subspace_1000x6.json', 'r') as fin:
             self.random_basis = np.array(json.load(fin))
     except IOError:
         np.random.seed(1000)
         self.random_basis = special_ortho_group.rvs(1000)[:6, :]
         with open('data/random_subspace_1000x6.json', 'w') as fout:
             json.dump(self.random_basis.tolist(), fout)
예제 #21
0
  def test_quadratic_with_skew(self):
    """Can minimize a general quadratic function."""
    dim = 3
    np.random.seed(26535)
    minimum = np.random.randn(dim)
    principal_values = np.diag(np.exp(np.random.randn(dim)))
    rotation = special_ortho_group.rvs(dim)
    hessian = np.dot(np.transpose(rotation), np.dot(principal_values, rotation))
    def quadratic(x):
      y = x - minimum
      yp = tf.tensordot(hessian, y, axes=[1, 0])
      value = tf.reduce_sum(input_tensor=y * yp) / 2
      return value

    start = tf.ones_like(minimum)
    results = self.evaluate(tfp.optimizer.nelder_mead_minimize(
        quadratic,
        initial_vertex=start,
        func_tolerance=1e-12,
        batch_evaluate_objective=False))
    self.assertTrue(results.converged)
    self.assertArrayNear(results.position, minimum, 1e-6)
예제 #22
0
  def test_quadratic_with_skew(self):
    """Can minimize a general quadratic function."""
    dim = 3
    np.random.seed(26535)
    minimum = np.random.randn(dim)
    principal_values = np.diag(np.exp(np.random.randn(dim)))
    rotation = special_ortho_group.rvs(dim)
    hessian = np.dot(np.transpose(rotation), np.dot(principal_values, rotation))
    def quadratic(x):
      y = x - minimum
      yp = tf.tensordot(hessian, y, axes=[1, 0])
      value = tf.reduce_sum(y * yp) / 2
      return value

    start = tf.ones_like(minimum)
    results = self.evaluate(tfp.optimizer.nelder_mead_minimize(
        quadratic,
        initial_vertex=start,
        func_tolerance=1e-12,
        batch_evaluate_objective=False))
    self.assertTrue(results.converged)
    self.assertArrayNear(results.position, minimum, 1e-6)
예제 #23
0
def rotate_hilbert(_scores, max_iter=20, tol=1e-6):
    "rotate scores to maximize their phase-shift to 90°"

    def objective(x, _scores: ndarray, k: int):
        from scipy.signal import hilbert

        R = x.reshape(k, k)
        scores = R.dot(_scores.T)
        select = np.triu(np.ones((k + 1, k + 1)), 1).flatten() == 1

        best = []
        for six, score in enumerate(scores):
            deriv = np.imag(hilbert(score))
            rvals = np.abs(np.corrcoef(scores, deriv))
            rvals = [v for s, v in zip(select, rvals.flatten()) if s]
            _best = np.max(np.abs(rvals))
            best.append(_best)
        # the closer to one, the better, but we need to invert for minimize
        cost = len(best) - np.sum(best)
        return cost

    p, k = _scores.shape
    initial = special_ortho_group.rvs(k).flatten()
    cons = [{"type": "eq", "fun": constrain_identity, "args": [k]}]
    bnds = [(-1.01, 1.01)] * len(initial)
    solution = minimize(
        objective,
        args=(_scores, k),
        x0=initial,
        method="SLSQP",
        bounds=bnds,
        constraints=cons,
    )

    R = solution.x.reshape(k, k)
    print("Hilbert Rotation finished after iteration", solution.nit, "with")
    pprint(R)
    return R
예제 #24
0
    def test_rmsd(self):

        x0 = onp.array([
            [1.0, 0.2, 3.3], # H
            [-0.6,-1.1,-0.9],# C
            [3.4, 5.5, 0.2], # H
            [3.6, 5.6, 0.6], # H
        ], dtype=onp.float64)

        x1 = onp.array([
            [1.0, 0.2, 3.3], # H
            [-0.6,-1.1,-0.9],# C
            [3.4, 5.5, 0.2], # H
            [3.6, 5.6, 0.6], # H
        ], dtype=onp.float64)

        onp.testing.assert_almost_equal(rmsd.opt_rot_rmsd(x0, x1), 0)

        # test random translation
        for _ in range(10):
            offset = onp.random.rand(1, 3)*10
            onp.testing.assert_almost_equal(rmsd.opt_rot_rmsd(x0+offset, x1), 0)
            onp.testing.assert_almost_equal(rmsd.opt_rot_rmsd(x0, x1+offset), 0)

        # generate random rotation matrix
        for _ in range(10):
            rot_x = special_ortho_group.rvs(3)
            rot = onp.dot(rot_x, rot_x.T)
            onp.testing.assert_almost_equal(rmsd.opt_rot_rmsd(onp.dot(x0, rot), x1), 0)
            onp.testing.assert_almost_equal(rmsd.opt_rot_rmsd(x0, onp.dot(x1, rot)), 0)

        assert rmsd.opt_rot_rmsd(x0 + onp.random.rand(x0.shape[0],3)*10, x1) > 1e-1

        check_grads(rmsd.opt_rot_rmsd, (x0 + onp.random.rand(x0.shape[0],3)*10, x1), order=1, eps=1e-5)
        check_grads(rmsd.opt_rot_rmsd, (x0 + onp.random.rand(x0.shape[0],3)*10, x1), order=2, eps=1e-5)

        check_grads(rmsd.opt_rot_rmsd, (x0, x1 + onp.random.rand(x0.shape[0],3)*10), order=1, eps=1e-5)
        check_grads(rmsd.opt_rot_rmsd, (x0, x1 + onp.random.rand(x0.shape[0],3)*10), order=2, eps=1e-5)
예제 #25
0
  def test_quadratic_with_strong_skew(self):
    """Can minimize a strongly skewed quadratic function."""
    np.random.seed(89793)
    minimum = np.random.randn(3)
    principal_values = np.diag(np.array([0.1, 2.0, 50.0]))
    rotation = special_ortho_group.rvs(3)
    hessian = np.dot(np.transpose(rotation), np.dot(principal_values, rotation))

    @_make_val_and_grad_fn
    def quadratic(x):
      y = x - minimum
      yp = tf.tensordot(hessian, y, axes=[1, 0])
      return tf.reduce_sum(y * yp) / 2

    start = tf.ones_like(minimum)
    results = self.evaluate(tfp.optimizer.bfgs_minimize(
        quadratic, initial_position=start, tolerance=1e-8))
    self.assertTrue(results.converged)
    final_gradient = results.objective_gradient
    final_gradient_norm = _norm(final_gradient)
    print (final_gradient_norm)
    self.assertTrue(final_gradient_norm <= 1e-8)
    self.assertArrayNear(results.position, minimum, 1e-5)
예제 #26
0
  def test_quadratic_with_skew(self):
    """Can minimize a general quadratic function."""
    dim = 3
    np.random.seed(26535)
    minimum = np.random.randn(dim)
    principal_values = np.diag(np.exp(np.random.randn(dim)))
    rotation = special_ortho_group.rvs(dim)
    hessian = np.dot(np.transpose(rotation), np.dot(principal_values, rotation))

    @_make_val_and_grad_fn
    def quadratic(x):
      y = x - minimum
      yp = tf.tensordot(hessian, y, axes=[1, 0])
      return tf.reduce_sum(y * yp) / 2

    start = tf.ones_like(minimum)
    results = self.evaluate(tfp.optimizer.bfgs_minimize(
        quadratic, initial_position=start, tolerance=1e-8))
    self.assertTrue(results.converged)
    final_gradient = results.objective_gradient
    final_gradient_norm = _norm(final_gradient)
    self.assertLessEqual(final_gradient_norm, 1e-8)
    self.assertArrayNear(results.position, minimum, 1e-5)
예제 #27
0
    def _generate_random_camera_data(self, n):
        random_intrinsic_matrices = np.random.rand(n, 3, 3)
        random_intrinsic_matrices[:, 0, 1] = 0
        random_intrinsic_matrices[:, 1, 0] = 0
        random_intrinsic_matrices[:, 2, :] = np.array([0, 0, 1])
        random_intrinsic_matrices *= 1000

        random_intrinsic_matrices_inv = np.linalg.inv(
            random_intrinsic_matrices)

        random_extrinsic_matrices = np.random.rand(n, 3, 4) * 2
        random_translation_vectors = random_extrinsic_matrices[:, :, 3:4]
        random_rotation_matrices = special_ortho_group.rvs(dim=3, size=500)
        random_extrinsic_matrices[:, :, 0:3] = random_rotation_matrices

        random_pose_matrices = np.block([[
            random_rotation_matrices.transpose((0, 2, 1)),
            -1 * random_rotation_matrices.transpose(
                (0, 2, 1)) @ random_translation_vectors
        ]])
        random_camera_centers = random_pose_matrices[:, :, 3:4]

        return random_intrinsic_matrices, random_intrinsic_matrices_inv, random_extrinsic_matrices, random_pose_matrices, random_camera_centers
예제 #28
0
  def test_quadratic_with_strong_skew(self):
    """Can minimize a strongly skewed quadratic function."""
    np.random.seed(89793)
    minimum = np.random.randn(3)
    principal_values = np.diag(np.array([0.1, 2.0, 50.0]))
    rotation = special_ortho_group.rvs(3)
    hessian = np.dot(np.transpose(rotation), np.dot(principal_values, rotation))
    def quadratic(x):
      y = x - minimum
      yp = tf.tensordot(hessian, y, axes=[1, 0])
      value = tf.reduce_sum(y * yp) / 2
      return value, tf.gradients(value, x)[0]

    with self.test_session() as session:
      start = tf.ones_like(minimum)
      results = session.run(tfp.optimizer.bfgs_minimize(
          quadratic, initial_position=start, tolerance=1e-8))
      self.assertTrue(results.converged)
      final_gradient = results.objective_gradient
      final_gradient_norm = np.sqrt(np.sum(final_gradient * final_gradient))
      print (final_gradient_norm)
      self.assertTrue(final_gradient_norm <= 1e-8)
      self.assertArrayNear(results.position, minimum, 1e-5)
예제 #29
0
def generate_synthetic(n, sigma):
    T = np.zeros((n, 4, 4))
    X = so.rvs(dim=3, size=n)
    T[:, :3, :3] = X
    # u, sigma, v = np.linalg.svd(T[0])
    T[:, :3, 3] = np.random.randn(n, 3)
    T[0, :3, 3] = 0.0
    T[:, 3, 3] = 1
    edges = []
    for i in range(n):
        for j in range(n):
            if i <= j:
                continue
            Tij = T[j].dot(inverse(T[i]))
            Rij, tij = __decompose__(Tij)
            Rij = Rij + np.random.randn(3, 3) * sigma
            Rij = project_so(Rij)
            tij = tij + np.random.randn(3) * sigma
            Tij = __pack__(Rij, tij)
            edge = {'src': i, 'tgt': j, 'R': Rij, 't': tij, 'weight': 1.0}
            edges.append(edge)
    edges = np.array(edges)
    return n, edges, T
  def test_quadratic_with_strong_skew(self):
    """Can minimize a strongly skewed quadratic function."""
    np.random.seed(89793)
    minimum = np.random.randn(3)
    principal_values = np.diag(np.array([0.1, 2.0, 50.0]))
    rotation = special_ortho_group.rvs(3)
    hessian = np.dot(np.transpose(rotation), np.dot(principal_values, rotation))
    def quadratic(x):
      y = x - minimum
      yp = tf.tensordot(hessian, y, axes=[1, 0])
      return tf.reduce_sum(y * yp) / 2

    def objective_func(population):
      return tf.map_fn(quadratic, population)

    start = tf.ones_like(minimum)
    results = self.evaluate(tfp.optimizer.differential_evolution_minimize(
        objective_func,
        initial_position=start,
        func_tolerance=1e-12,
        max_iterations=150,
        seed=3321))
    self.assertTrue(results.converged)
    self.assertArrayNear(results.position, minimum, 1e-5)
def gen_test_data():
  """Generate the new (modified) test data."""
  # Create output directory.
  os.makedirs(FLAGS.output_directory, exist_ok=True)

  # Get all test point cloud files in the original dataset.
  input_test_files = glob.glob(FLAGS.input_test_files)

  for in_file in input_test_files:
    out_file_prefix = pathlib.Path(in_file).stem
    pts = np.loadtxt(in_file)  # N x 3
    num_pts_to_keep = pts.shape[0] // 2
    pts = pts[:num_pts_to_keep, :]  # N//2 x 3.

    for k in range(FLAGS.num_rotations_per_file):
      if FLAGS.random_rotation_axang:
        r = utils.random_rotation_benchmark_np(1)
        r = r[0]
      else:
        r = special_ortho_group.rvs(3)
      joined = np.float32(np.concatenate((r, pts), axis=0))  # (N//2+3) x 3.
      out_file = os.path.join(
          FLAGS.output_directory, '%s_r%03d.pts'%(out_file_prefix, k))
      np.savetxt(out_file, joined)
예제 #32
0
    def load_dataset(self):
        shape_names = [
            shape_name for shape_name in os.listdir(self.shape_folder)
            if '.obj' in shape_name
        ]
        meshes = []
        for shape_name in shape_names:
            shape_path = os.path.join(self.shape_folder, shape_name)
            shape_mesh = trimesh.load(shape_path)
            assert shape_mesh.is_watertight, 'mesh at {} should be watertight'.format(
                shape_path)
            meshes.append(shape_mesh)

        objverts3d = []
        objfaces = []
        for sample_idx in range(self.size):
            mesh = random.choice(meshes)
            vertices = np.array(mesh.vertices)
            objfaces.append(np.array(mesh.faces))
            rot_mat = special_ortho_group.rvs(3)
            vertices = rot_mat.dot(vertices.transpose()).transpose()
            objverts3d.append(vertices)
        self.objverts3d = objverts3d
        self.objfaces = objfaces
예제 #33
0
            q[:, 0] = q[:, 3]
            q[:, 3] = q[:, 2]
            q[:, 2] = q[:, 1]
            q[:, 1] = self.mem

        elif input_format == 'wxyz':
            self.mem = q[:, 0].clone()

            q[:, 0] = q[:, 1]
            q[:, 1] = q[:, 2]
            q[:, 2] = q[:, 3]
            q[:, 3] = self.mem
        return q


if __name__ == "__main__":
    bs = 10
    re_q = RearangeQuat(bs)
    from scipy.spatial.transform import Rotation as R
    from scipy.stats import special_ortho_group
    mat = special_ortho_group.rvs(dim=3, size=bs)
    quat = R.from_matrix(mat).as_quat()

    q = torch.from_numpy(quat)
    print('Input', q)
    re_q(q, input_format='xyzw')
    print('Output', q)
    re_q(q, input_format='wxyz')
    print('Same as Input', q)
예제 #34
0
def test_dcm_calculation_pipeline():
    dcm = special_ortho_group.rvs(3, size=10, random_state=0)
    assert_array_almost_equal(Rotation.from_dcm(dcm).as_dcm(), dcm)
예제 #35
0
def test_matrix_calculation_pipeline():
    mat = special_ortho_group.rvs(3, size=10, random_state=0)
    assert_array_almost_equal(Rotation.from_matrix(mat).as_matrix(), mat)
예제 #36
0
def test_dcm_calculation_pipeline():
    dcm = special_ortho_group.rvs(3, size=10, random_state=0)
    assert_array_almost_equal(Rotation.from_dcm(dcm).as_dcm(), dcm)
예제 #37
0
def flow_forward(input_dim: int, act_func_pair: tuple = (None, None), batch_norm: bool = False):
    chunk = {}
    log_det_J = 0

    chunk['input_dim'] = input_dim
    _ph = C.placeholder(input_dim, name='place_holder')
    _out = _ph

    if batch_norm:
        # _bn = C.layers.BatchNormalization(name='batch_norm')(_ph)
        # chunk['scale'] = _bn.parameters[0]
        # chunk['bias'] = _bn.parameters[1]

        chunk['mu'] = C.Constant(np.zeros(shape=input_dim))
        chunk['var'] = C.Constant(np.ones(shape=input_dim))

        _eps = C.Constant(1e-7)
        _mu = C.reduce_mean(_ph, axis=C.Axis.default_batch_axis())
        _var = C.reduce_mean(C.square(_ph-_mu), axis=C.Axis.default_batch_axis())

        chunk['muB'] = _mu
        chunk['varB'] = _var

        # _bn = (_ph-chunk['mu'])/C.sqrt(chunk['var']+_eps)
        _bn = C.sqrt(chunk['var']+_eps)*_ph + chunk['mu']
        _ph = _bn

        log_det_J += -0.5*C.reduce_sum(C.log((_var+_eps)))
        # log_det_J += C.reduce_sum(C.log())

    chunk['W_rot_mat'] = _W = C.parameter((input_dim, input_dim))
    _W.value = random_rotation_matrix = special_ortho_group.rvs(input_dim)
    # _W.value = np.roll(np.eye(input_dim),input_dim//2,axis=0)
    _out = _ph@_W
    log_det_J += C.log(C.abs(C.det(_W))) # or # log_det_J += C.slogdet(_W)[1]
    
    _half_dim = input_dim//2
    _x1 = _out[:_half_dim]
    _x2 = _out[_half_dim:]

    _log_s_func, _t_func = act_func_pair
    if _log_s_func is None: # basic network
        _log_s_func = C.layers.Sequential([
            C.layers.Dense(256, C.leaky_relu),
            C.layers.Dense(256, C.leaky_relu),
            C.layers.Dense(_half_dim, C.tanh),
        ])#(C.placeholder(input_dim, name='place_holder'))
    if _t_func is None: # basic network
        _t_func = C.layers.Sequential([
            C.layers.Dense(256, C.leaky_relu),
            C.layers.Dense(256, C.leaky_relu),
            C.layers.Dense(_half_dim),
        ])#(C.placeholder(input_dim, name='place_holder'))

    chunk['log_s_func'] = _log_s_func
    chunk['t_func'] = _t_func

    _log_s, _t = _log_s_func(_x2), _t_func(_x2)

    _s = C.exp(_log_s)

    _y1 = _s*_x1 + _t
    _y2 = _x2

    _Y = C.splice(_y1, _y2)
    chunk['output'] = _Y

    log_det_J += C.reduce_sum(_log_s)

    return _Y, log_det_J, chunk
예제 #38
0
파일: fitobj.py 프로젝트: hassony2/epicviz
def fitobj2mask(
    masks,
    bboxes,
    obj_paths,
    z_off=0.5,
    radius=0.1,
    faces_per_pixel=1,
    lr=0.01,
    loss_type="l2",
    iters=100,
    viz_step=1,
    save_folder="tmp/",
    viz_rows=12,
    crop_box=True,
    crop_size=(200, 200),
    rot_nb=1,
):
    # Initialize logging info
    opts = {
        "z_off": z_off,
        "loss_type": loss_type,
        "iters": iters,
        "radius": radius,
        "lr": lr,
        "obj_paths": obj_paths,
        "faces_per_pix": faces_per_pixel,
    }
    results = {"opts": opts}
    save_folder = Path(save_folder)
    print(f"Saving to {save_folder}")
    metrics = defaultdict(list)

    batch_size = len(obj_paths)
    # Load normalized object
    batch_faces = []
    batch_verts = []
    for obj_path in obj_paths:
        verts_loc, faces_idx, _ = py3dload_obj(obj_path)
        faces = faces_idx.verts_idx
        batch_faces.append(faces.cuda())

        verts = normalize.normalize_verts(verts_loc, radius).cuda()
        batch_verts.append(verts)
    batch_verts = torch.stack(batch_verts)
    batch_faces = torch.stack(batch_faces)

    # Dummy intrinsic camera
    height, width = masks[0].shape
    focal = min(masks[0].shape)
    camintr = (
        torch.Tensor(
            [[focal, 0, width // 2], [0, focal, height // 2], [0, 0, 1]]
        )
        .cuda()
        .unsqueeze(0)
        .repeat(batch_size, 1, 1)
    )

    if crop_box:
        adaptive_loss = AdaptiveLossFunction(
            num_dims=crop_size[0] * crop_size[1],
            float_dtype=np.float32,
            device="cuda:0",
        )
    else:
        adaptive_loss = AdaptiveLossFunction(
            num_dims=height * width, float_dtype=np.float32, device="cuda:0"
        )
    # Prepare rigid parameters
    if rot_nb > 1:
        rot_mats = [special_ortho_group.rvs(3) for _ in range(rot_nb)]
        rot_vecs = torch.Tensor(
            [np.linalg.svd(rot_mat)[0][:2].reshape(-1) for rot_mat in rot_mats]
        )
        rot_vec = rot_vecs.repeat(batch_size, 1).cuda()
        # Ordering b1 rot1, b1 rot2, ..., b2 rot1, ...
    else:
        rot_vec = torch.Tensor(
            [[1, 0, 0, 0, 1, 0] for _ in range(batch_size)]
        ).cuda()

    bboxes_tight = torch.stack(bboxes)
    # trans = ops3d.trans_init_from_boxes(bboxes, camintr, (z_off, z_off)).cuda()
    trans = ops3d.trans_init_from_boxes_autodepth(
        bboxes_tight, camintr, batch_verts, z_guess=z_off
    ).cuda()
    # Repeat to match rots
    trans = repeatdim(trans, rot_nb, 1)
    bboxes = boxutils.preprocess_boxes(bboxes_tight, padding=10, squarify=True)
    if crop_box:
        camintr_crop = camutils.get_K_crop_resize(camintr, bboxes, crop_size)
    camintr_crop = repeatdim(camintr_crop, rot_nb, 1)

    trans.requires_grad = True
    rot_vec.requires_grad = True
    optim_params = [rot_vec, trans]
    if "adapt" in loss_type:
        optim_params = optim_params + list(adaptive_loss.parameters())
    optimizer = torch.optim.Adam([rot_vec, trans], lr=lr)

    ref_masks = torch.stack(masks).cuda()
    if crop_box:
        ref_masks = cropping.crops(ref_masks.float(), bboxes, crop_size)[:, 0]

    # Prepare reference mask
    if "dtf" in loss_type:
        target_masks = torch.stack(
            [torch.Tensor(dtf.distance_transform(mask)) for mask in ref_masks]
        ).cuda()
    else:
        target_masks = ref_masks
    ref_masks = repeatdim(ref_masks, rot_nb, 1)
    target_masks = repeatdim(target_masks, rot_nb, 1)
    batch_verts = repeatdim(batch_verts, rot_nb, 1)
    batch_faces = repeatdim(batch_faces, rot_nb, 1)

    col_nb = 5
    fig_res = 1.5
    # Aggregate images
    clip_data = []
    for iter_idx in tqdm(range(iters)):
        rot_mat = rotations.compute_rotation_matrix_from_ortho6d(rot_vec)
        optim_verts = batch_verts.bmm(rot_mat) + trans.unsqueeze(1)
        if crop_box:
            rendres = batch_render(
                optim_verts,
                batch_faces,
                K=camintr_crop,
                image_sizes=[(crop_size[1], crop_size[0])],
                mode="silh",
                faces_per_pixel=faces_per_pixel,
            )
        else:
            rendres = batch_render(
                optim_verts,
                batch_faces,
                K=camintr,
                image_sizes=[(width, height)],
                mode="silh",
                faces_per_pixel=faces_per_pixel,
            )
        optim_masks = rendres[:, :, :, -1]
        mask_diff = ref_masks - optim_masks
        mask_l2 = (mask_diff ** 2).mean()
        mask_l1 = mask_diff.abs().mean()
        mask_iou = lyiou.batch_mask_iou(
            (optim_masks > 0), (ref_masks > 0)
        ).mean()
        metrics["l1"].append(mask_l1.item())
        metrics["l2"].append(mask_l2.item())
        metrics["mask"].append(mask_iou.item())

        optim_mask_diff = target_masks - optim_masks
        if "l2" in loss_type:
            loss = (optim_mask_diff ** 2).mean()
        elif "l1" in loss_type:
            loss = optim_mask_diff.abs().mean()
        elif "adapt" in loss_type:
            loss = adaptive_loss.lossfun(
                optim_mask_diff.view(rot_nb * batch_size, -1)
            ).mean()
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        if iter_idx % viz_step == 0:
            row_idxs = np.linspace(
                0, batch_size * rot_nb - 1, viz_rows
            ).astype(np.int)
            row_nb = viz_rows
            fig, axes = plt.subplots(
                row_nb,
                col_nb,
                figsize=(int(col_nb * fig_res), int(row_nb * fig_res)),
            )
            for row_idx in range(row_nb):
                show_idx = row_idxs[row_idx]
                ax = vizmp.get_axis(
                    axes, row_idx, 0, row_nb=row_nb, col_nb=col_nb
                )
                ax.imshow(npt.numpify(optim_masks[show_idx]))
                ax.set_title("optim mask")
                ax = vizmp.get_axis(
                    axes, row_idx, 1, row_nb=row_nb, col_nb=col_nb
                )
                ax.imshow(npt.numpify(ref_masks[show_idx]))
                ax.set_title("ref mask")
                ax = vizmp.get_axis(
                    axes, row_idx, 2, row_nb=row_nb, col_nb=col_nb
                )
                ax.imshow(
                    npt.numpify(ref_masks[show_idx] - optim_masks[show_idx]),
                    vmin=-1,
                    vmax=1,
                )
                ax.set_title("ref masks diff")
                ax = vizmp.get_axis(
                    axes, row_idx, 3, row_nb=row_nb, col_nb=col_nb
                )
                ax.imshow(npt.numpify(target_masks[show_idx]), vmin=-1, vmax=1)
                ax.set_title("target mask")
                ax = vizmp.get_axis(
                    axes, row_idx, 4, row_nb=row_nb, col_nb=col_nb
                )
                ax.imshow(
                    npt.numpify(
                        target_masks[show_idx] - optim_masks[show_idx]
                    ),
                    vmin=-1,
                    vmax=1,
                )
                ax.set_title("masks diff")
            viz_folder = save_folder / "viz"
            viz_folder.mkdir(parents=True, exist_ok=True)
            data = vizmp.fig2np(fig)
            clip_data.append(data)
            fig.savefig(viz_folder / f"{iter_idx:04d}.png")

    clip = mpy.ImageSequenceClip(clip_data, fps=4)
    clip.write_videofile(str(viz_folder / "out.mp4"))
    clip.write_videofile(str(viz_folder / "out.webm"))
    results["metrics"] = metrics
    return results