Пример #1
0
def quat_math(q0, q1, inv0=False, inv1=False):
    """ Performs addition and subtraction between quaternions
    :param q0: a vector of length 4, quaternion rotation (x,y,z,w)
    :param q1: a vector of length 4, quaternion rotation (x,y,z,w)
    :param inv0: if True, inverts q0
    :param inv1: if True, inverts q1

    Examples:
    to get the total rotation from going to q0 then q1: quat_math(q0,q1,False,False)
    to get the rotation from q1 to q0: quat_math(q0,q1,True,False)
    """
    if not isinstance(q0, np.ndarray):
        q0 = np.array(q0)
    if not isinstance(q1, np.ndarray):
        q1 = np.array(q1)
    q0 = to_transquat(q0)
    q0 = trans.unit_vector(q0)
    q1 = to_transquat(q1)
    q1 = trans.unit_vector(q1)
    if inv0:
        q0 = trans.quaternion_conjugate(q0)
    if inv1:
        q1 = trans.quaternion_conjugate(q1)
    res = trans.quaternion_multiply(q0, q1)
    return to_pyquat(res)
Пример #2
0
    def __init__(self, length, axis_normal, x, y, z):
        self.length = length
        self.axes = []
        for vector in [x, y, z]:
            self.axes.append(append(tf.unit_vector(array(vector)), [1]))
        self.axis_normal = append(tf.unit_vector(array(axis_normal)), [1])

        self.backup = [self.axis_normal, self.axes]
Пример #3
0
    def user_pan(self, dx, dy):
        """ Pans the camera based on passed mouse movements.

        user_pan(int, int) -> None
        """
        sensetivity = self._settings["pan_sensetivity"]
        focus_to_cam = self._position - self._focus
        x = [self._position, self._focus]
        for i in range(len(x)):
            x[i] = x[i] + (-sensetivity*dy * tf.unit_vector(self._up))
            x[i] = x[i] + (sensetivity*dx * tf.unit_vector(
                cross(focus_to_cam, self._up)))
        self._position, self._focus = x
Пример #4
0
 def tops(self, tops):
     if tops is None:
         v1, v2, v3 = list(self.verteces)
         n1 = v2.coord - v1.coord
         n2 = v3.coord - v1.coord
         vec = np.cross(n1, n2)
         d = 84 * 0.3
         top1 = Top(self.coord + d * unit_vector(vec))
         top2 = Top(self.coord - d * unit_vector(vec))
         top1.trimer = self
         top2.trimer = self
         tops = set([top1, top2])
     else:
         assert (len(tops) == 2)
     self.__tops = tops
Пример #5
0
def qv_mult(q1, v1):
    v1 = transformations.unit_vector(v1)
    q2 = list(v1)
    q2.append(0.0)
    return transformations.quaternion_multiply(
        transformations.quaternion_multiply(q1, q2),
        transformations.quaternion_conjugate(q1))[:3]
Пример #6
0
    def user_orbit(self, dx, dy):
        """ Orbits the camera based on passed mouse movements.

        user_pan(int, int) -> None

        Note: h denotes horizontal and v denotes vertical
        """
        sensetivity = self._settings["orbit_sensetivity"]
        focus_to_cam = self._position - self._focus

        dist_to_focus_h = norm(focus_to_cam[:2])
        dist_to_focus = norm(focus_to_cam)

        angleh = atan2(focus_to_cam[1], focus_to_cam[0])
        anglev = atan2(focus_to_cam[2], dist_to_focus_h)

        angleh -= sensetivity*dx
        anglev -= sensetivity*dy

        if anglev >= pi/2-0.1:
            anglev = pi/2-0.1
        if anglev <= -pi/2+0.1:
            anglev = -pi/2+0.1

        focus_to_cam = array(
            [cos(angleh)*cos(anglev)*dist_to_focus,
            sin(angleh)*cos(anglev)*dist_to_focus,
            sin(anglev)*dist_to_focus]
        )

        self._up = tf.unit_vector(-1 * cross(focus_to_cam,
                            cross(focus_to_cam, array([0, 0, 1]))))
        self._position = self._focus + focus_to_cam
Пример #7
0
 def strafe(self, ds):
     strafe_vec = vector_product([0, 1, 0], self.ref)
     strafe_vec = unit_vector(strafe_vec)
     x = self.pos[0] + strafe_vec[0] * ds
     y = self.pos[1] + strafe_vec[1] * ds
     z = self.pos[2] + strafe_vec[2] * ds
     self.pos = [x, y, z]
Пример #8
0
 def projectVectorsImageSBA(self, IK, image):
     vec_list = []
     body2ned = image.get_body2ned_sba()
     cam2body = image.get_cam2body()
     for uv in image.uv_list:
         uvh = np.array([uv[0], uv[1], 1.0])
         proj = body2ned.dot(cam2body).dot(IK).dot(uvh)
         proj_norm = transformations.unit_vector(proj)
         vec_list.append(proj_norm)
     return vec_list
Пример #9
0
    def user_zoom(self, scroll_y):
        """ Zooms the camera based on passed mouse movements.

        user_zoom(int) -> None
        """
        sensetivity = self._settings["zoom_sensetivity"]
        u_focus_to_cam = tf.unit_vector(self._position - self._focus)
        self._position = self._position + \
                         scroll_y*sensetivity * u_focus_to_cam
        if norm(self._position - self._focus) <= 1:
            self._focus = self._position + -1 * u_focus_to_cam
        return None
Пример #10
0
    def projectVectors(self, IK, body2ned, cam2body, uv_list):
        proj_list = []
        for uv in uv_list:
            uvh = np.array([uv[0], uv[1], 1.0])
            proj = body2ned.dot(cam2body).dot(IK).dot(uvh)
            proj_norm = transformations.unit_vector(proj)
            proj_list.append(proj_norm)

        #for uv in uv_list:
        #    print "uv:", uv
        #    uvh = np.array([uv[0], uv[1], 1.0])
        #    print "cam vec=", transformations.unit_vector(IR.dot(IK).dot(uvh))
        return proj_list
    def __init__(self, T):

        if ( type(T) == str):
            Tfile = T
            #Load transformation
            Tfis = open(Tfile, 'r')
            lines = []
            lines = Tfis.readlines()
            self.scale = float(lines[0])
            self.Ss = tf.scale_matrix(self.scale)
            quat_line = lines[1].split(" ")
            self.quat = tf.unit_vector(np.array([float(quat_line[3]),
                                            float(quat_line[0]),
                                            float(quat_line[1]),
                                            float(quat_line[2])]))
            self.Hs = tf.quaternion_matrix(self.quat)
            trans_line = lines[2].split(" ")
            self.Ts = np.array([float(trans_line[0]),
                           float(trans_line[1]),
                           float(trans_line[2])])
            Tfis.close()
            self.Rs = self.Hs.copy()[:3, :3]
            self.Hs[:3, 3] = self.Ts[:3]

            self.Hs = self.Ss.dot(self.Hs)  # to add again

        elif (type(T) == np.ndarray):
            self.Hs = T
            scale, shear, angles, trans, persp = tf.decompose_matrix(T)
            self.quat = tf.quaternion_from_euler(angles[0], angles[1], angles[2])
            self.Rs = tf.quaternion_matrix(self.quat)
            self.scale =scale[0]
            self.Ts = trans / self.scale


        print "Loaded Ground Truth Transformation: "
        print self.Hs
Пример #12
0
    def add(self,
            particle,
            template,
            adding_edge,
            debug=False):  # returns a candidate new vertex coord
        #     V4,V5,V6,V7
        #
        #          V1-------V3
        #         /  \     /
        #       /     \  /
        #      V5------V2
        #
        # E1 is midpoint of V1,V2 (the adding edge)
        # V3 is the vertex opposite the adding edge
        # V5 is the new vertex we need to calculate
        #
        #V4 will be an intermediate point along the axis E1-V3 which is the appropriate stem length for the template
        #    (essentially making the E1V3 distance longer or shorter)
        #
        # The convention I used is upper case V is the Vertex object. Lower case v is the actual coordinate (a numpy vector)

        assert (self.full == False)

        E1 = adding_edge
        (V1, V2) = adding_edge.verteces
        V3 = self.opposite_vertex(adding_edge)

        # # maybe extend length a bit along the V1V3 vector to get the appropriate stem length for the template

        E1V3 = E1.coord - V3.coord
        V1V3_dist = np.linalg.norm(V3.coord - E1.coord)
        d = template.stem_length / V1V3_dist

        v4_1 = E1.coord - d * E1V3
        v4_2 = E1.coord + d * E1V3
        dist1 = np.linalg.norm(v4_1 - V3.coord)
        dist2 = np.linalg.norm(v4_2 - V3.coord)

        if dist1 < dist2:  # pick the value closer to V3 to become V4
            v4 = v4_1
        else:
            v4 = v4_2

        # set origin of rotation to E1
        e1 = E1.coord
        v4 = v4 - e1

        # get two new points with consistent angle for first rotation
        axis = V1.coord - V2.coord
        axis = unit_vector(axis)
        a1 = template.angle_radians
        a2 = -1 * (template.angle_radians)
        v5_1 = self.get_new_point(e1, v4, a1, axis)
        v5_2 = self.get_new_point(e1, v4, a2, axis)

        if debug is True:  # add debug verteces for the chosen verteces
            dbgv1 = Vertex(v5_1)
            particle.debug_verteces.add(dbgv1)
            dbgv2 = Vertex(v5_2)
            particle.debug_verteces.add(dbgv2)
            dbgv3 = Vertex(E1.coord)
            particle.debug_verteces.add(dbgv3)

        # pick closest to centroid
        if np.linalg.norm(v5_1 - particle.centroid) < np.linalg.norm(
                v5_2 - particle.centroid):
            new_point = v5_1

        else:
            new_point = v5_2

        new_vertex = Vertex(new_point)  # the final V5

        return new_vertex
Пример #13
0
    def add2(self, particle, template,
             adding_edge):  # returns a candidate new vertex coord
        angle1 = template.angle1
        angle2 = template.angle2
        stem_length = template.stem_length

        assert (self.full == False)

        #     V4,V5,V6,V7
        #
        #          V1-------V3
        #         /  \     /
        #       /     \  /
        #      V7------V2
        #
        # E1 is midpoint of V1,V2 (the adding edge)
        # V4 is the first point on the way to the final point (V7). V4 is 1 stem_length above E1, 90 degrees from the Plan(V1,V2,V3)
        # V5 will be an intermeidate point after the first rotation
        # A 90 degree rotation of V5 each way will give V6, a test point to see which direction to rotate
        # A final rotation of V5 will yield V7

        E1 = adding_edge
        (V1, V2) = adding_edge.verteces
        V3 = self.opposite_vertex(adding_edge)

        # debug
        V1.atom_name = "Be"
        V2.atom_name = "Ba"
        V3.atom_name = "Ca"
        adding_edge.atom_name = "Na"

        vec_V2V3 = V2.coord - V3.coord
        vec_V2V1 = V2.coord - V1.coord
        vec_normal = np.cross(vec_V2V1, vec_V2V3)

        V4_1 = E1.coord + (stem_length * unit_vector(vec_normal))
        V4_2 = E1.coord - (stem_length * unit_vector(vec_normal))

        dbgv = Vertex(V4_1)
        dbgv.atom_name = "S"
        particle.debug_verteces.add(dbgv)
        dbgv = Vertex(V4_2)
        dbgv.atom_name = "O"
        particle.debug_verteces.add(dbgv)

        dist1 = np.linalg.norm(V4_1 - particle.centroid)
        dist2 = np.linalg.norm(V4_2 - particle.centroid)
        if (dist1 >= dist2):  # take the value further from centroid
            V4 = V4_1
            print("chose V4_1")
        else:
            V4 = V4_2
            print("chose V4_2")

        # set origin to E1
        e1 = E1.coord
        v1 = V1.coord - e1
        v2 = V2.coord - e1
        v3 = V3.coord - e1
        v4 = V4 - e1

        #########
        def get_new_point(
            reference_point, start_point, angle, axis
        ):  # where angle is radians of the stem length (edge to opposite vertex)

            M = rotation_matrix(angle, axis)
            new_point = np.dot(start_point, M[:3, :3].T)
            return new_point + reference_point

        # get two new points with consistent angle for first rotation
        axis = E1.coord - V3.coord

        axis = unit_vector(axis)

        V5_1 = get_new_point(e1, v4, angle1, axis)
        V5_2 = get_new_point(e1, v4, (2 * np.pi) - angle1, axis)

        dbgv = Vertex(V5_1)
        dbgv.atom_name = "Mg"
        particle.debug_verteces.add(dbgv)
        dbgv = Vertex(V5_2)
        dbgv.atom_name = "Al"
        particle.debug_verteces.add(dbgv)

        #perform a test rotation of V5 to get V6 and determine where to rotate towards for V7
        axis = V2.coord - V1.coord
        V5 = V5_1  # chooseing V5_1
        v5 = V5 - e1  # at some point will need to choose which V5 to use. (random choice?)

        a1 = (0.5) * np.pi
        a2 = (-0.5) * np.pi

        V6_1 = get_new_point(e1, v5, a1, axis)
        V6_2 = get_new_point(e1, v5, a2, axis)

        dbgv = Vertex(V6_1)
        dbgv.atom_name = "Cl"
        particle.debug_verteces.add(dbgv)
        dbgv = Vertex(V6_2)
        dbgv.atom_name = "Na"
        particle.debug_verteces.add(dbgv)

        dist1 = np.linalg.norm(V6_1 - V3.coord)
        dist2 = np.linalg.norm(V6_2 - V3.coord)
        print(dist1, dist2)
        if dist1 > dist2:
            V6 = V6_1
            print("chose V6_1")
            sign = 1
        else:
            V6 = V6_2
            print("chose V6_2")
            sign = -1

        # make final rotation of V5 to V7
        a1 = (0.5 + (sign * angle2)) * np.pi
        a2 = (1 + (sign * angle2)) * np.pi

        V7_1 = get_new_point(e1, v5, a1, axis)
        V7_2 = get_new_point(e1, v5, a2, axis)

        dist1 = np.linalg.norm(V6 - V7_1)
        dist2 = np.linalg.norm(V6 - V7_2)

        if dist1 < dist2:
            V7 = V7_1
        else:
            V7 = V7_2

        dbgv = Vertex(V7_1)
        dbgv.atom_name = "Mn"
        particle.debug_verteces.add(dbgv)
        dbgv = Vertex(V7_2)
        dbgv.atom_name = "Li"
        particle.debug_verteces.add(dbgv)

        new_vertex = Vertex(V7)

        return new_vertex  # returns the adding_edge, new_vertex
Пример #14
0
    def add(self, particle, template,
            adding_edge):  # returns a candidate new vertex coord
        #print("Adding trimer template type:",template.template_type)
        angle = template.angle
        stem_length = template.stem_length

        assert (self.full == False)
        # edge = adding_edge
        # opposite_vertex = self.opposite_vertex(edge)
        # v1 = edge.vertex1.coord
        # v2 = edge.vertex2.coord
        # v3 = opposite_vertex.coord
        # v4 = self.coord
        # a = edge.coord
        #

        #print("adding template type:",template.template_type)

        E1 = adding_edge
        (V1, V2) = adding_edge.verteces
        V3 = self.opposite_vertex(adding_edge)

        # # maybe extend length a bit along the V1V3 vector to get the appropriate stem length for the template

        vec = E1.coord - V3.coord

        V1V3_dist = np.linalg.norm(V3.coord - E1.coord)
        #print(V1V3_dist)

        d = stem_length / V1V3_dist  # this does not extend

        #print(V1V3_dist,stem_length,d)
        extended_point1 = E1.coord - d * vec
        extended_point2 = E1.coord + d * vec
        dist1 = np.linalg.norm(extended_point1 - V3.coord)
        dist2 = np.linalg.norm(extended_point2 - V3.coord)
        if dist1 < dist2:
            extended_point = extended_point1
        else:
            extended_point = extended_point2

        # set origin to E1
        e1 = E1.coord
        v1 = V1.coord - e1
        v2 = V1.coord - e1
        v3 = V3.coord - e1
        extended_point = extended_point - e1

        #########
        def get_new_point(
            reference_point, start_point, angle, axis
        ):  # where angle is radians of the stem length (edge to opposite vertex)

            M = rotation_matrix(angle, axis)
            new_point = np.dot(start_point, M[:3, :3].T)
            return new_point + reference_point

        # get two new points with consistent angle for first rotation
        axis = V1.coord - V2.coord

        axis = unit_vector(axis)

        p1 = get_new_point(e1, extended_point, angle, axis)
        p2 = get_new_point(e1, extended_point, -1 * angle, axis)
        #debug
        # dbgv1 = Vertex(p1)
        # dbgv2 = Vertex(p2)
        # particle.debug_verteces.add(dbgv1)
        # particle.debug_verteces.add(dbgv2)

        # pick closest to centroid
        if np.linalg.norm(p1 - particle.centroid) < np.linalg.norm(
                p2 - particle.centroid):
            new_point = p1

        else:
            new_point = p2

        new_vertex = Vertex(new_point)

        return new_vertex  # returns the adding_edge, new_vertex
Пример #15
0
    def check_clash_tops(self, adding_edge, candidate_vertex):
        adding_trimer = next(iter(adding_edge.trimers))

        # make tops
        (v1, v2) = adding_edge.verteces
        v3 = candidate_vertex
        n1 = v2.coord - v1.coord
        n2 = v3.coord - v1.coord
        vec = np.cross(n1, n2)
        d = 25
        center = (adding_edge.coord + candidate_vertex.coord) / 2
        top1 = Top(center + d * unit_vector(vec))
        top2 = Top(center - d * unit_vector(vec))
        new_tops = {top1, top2}

        # get a set of trimers to check for clashes with (not closesly connected to current trimer)
        check_trimers = self.exclude_nearby_trimers(adding_trimer)

        # check for clashes
        top_clash_flag = False
        rejection = None
        for trimer in check_trimers:
            if top_clash_flag is False:

                (v1, v2, v3) = trimer.verteces
                (t1, t2) = trimer.tops

                results = [
                ]  # a list of True false, if any are True, there was a clash
                toltt = self.clash_tolerance_top_top
                toltv = self.clash_tolerance_top_vertex
                if results.append(
                        self.check_clash_between_two_objects(top1, v1, toltv)):
                    top_clash_flag = True
                elif results.append(
                        self.check_clash_between_two_objects(top1, v2, toltv)):
                    top_clash_flag = True
                elif results.append(
                        self.check_clash_between_two_objects(top1, v3, toltv)):
                    top_clash_flag = True
                elif results.append(
                        self.check_clash_between_two_objects(top1, t1, toltt)):
                    top_clash_flag = True
                elif results.append(
                        self.check_clash_between_two_objects(top1, t2, toltt)):
                    top_clash_flag = True
                elif results.append(
                        self.check_clash_between_two_objects(top2, v1, toltv)):
                    top_clash_flag = True
                elif results.append(
                        self.check_clash_between_two_objects(top2, v2, toltv)):
                    top_clash_flag = True
                elif results.append(
                        self.check_clash_between_two_objects(top2, v3, toltv)):
                    top_clash_flag = True
                elif results.append(
                        self.check_clash_between_two_objects(top2, t1, toltt)):
                    top_clash_flag = True
                elif results.append(
                        self.check_clash_between_two_objects(top2, t2, toltt)):
                    top_clash_flag = True

        if top_clash_flag is True:
            rejection = Rejection(self.particle.timestep, 3)

        return top_clash_flag, rejection
def compute_error(trial, descriptor_type, niter, percentile=99):

    src_scene_root = "/Users/isa/Experiments/reg3d_eval/downtown_dan/trial_" +str(trial);
    src_features_dir = "/Users/isa/Experiments/reg3d_eval/downtown_dan/trial_" +str(trial)+ "/" + descriptor_type + "_30"

    #read "geo-transformatiom"
    #************GEO**************"
    Tfile = "/data/lidar_providence/downtown_offset-1-financial-Hs.txt"
    Tfis = open(Tfile, 'r')
    lines=[];
    lines = Tfis.readlines();
    scale_geo = float(lines[0])
    Ss_geo = tf.scale_matrix(scale_geo);
    quat_line = lines[1].split(" ");
    quat_geo = np.array([float(quat_line[3]), float(quat_line[0]), float(quat_line[1]), float(quat_line[2])])
    Rs_geo = tf.quaternion_matrix(quat_geo);
    trans_line = lines[2].split(" ");
    trans_geo = np.array([float(trans_line[0]), float(trans_line[1]), float(trans_line[2])]);
    Tfis.close();
    Hs_geo = Rs_geo.copy();
    Hs_geo[:3, 3] = trans_geo[:3]
    Hs_geo = Ss_geo.dot(Hs_geo)
  
    LOG.debug( "\n************Geo************** \n Scale: \n%s \nR:\n%s \nT:\n%s \nH:\n%s", Ss_geo, Rs_geo, trans_geo, Hs_geo)
    
      
    #************Hs**************#  
    #read source to target "Ground Truth" Transformation
    Tfile = src_scene_root + "/Hs.txt";
    Tfis = open(Tfile, 'r')
    lines=[];
    lines = Tfis.readlines();
    scale = float(lines[0])
    Ss = tf.scale_matrix(scale);
    quat_line = lines[1].split(" ");
    quat = tf.unit_vector(np.array([float(quat_line[3]), float(quat_line[0]), float(quat_line[1]), float(quat_line[2])]))
    Hs = tf.quaternion_matrix(quat);
    trans_line = lines[2].split(" ");
    Ts = np.array([float(trans_line[0]), float(trans_line[1]), float(trans_line[2])]);
    Tfis.close();
    Rs = Hs.copy()[:3,:3];
    Hs[:3, 3] = Ts[:3]
    Hs=Ss.dot(Hs) 
    
    Rs = Rs;
    Ts = Ts;
  
    LOG.debug( "\n************Hs************** \n R:\n%s \nT:\n%s \nH:\n%s", Rs, Ts, Hs)
  
    #************Hs IA**************
    #read source to target "Initial Alignment" Transformation
    Tfile = src_features_dir + "/ia_transformation_" + str(percentile) + "_" + str(niter) + ".txt";    
    Tfis = open(Tfile, 'r')
    Hs_ia = np.genfromtxt(Tfis, skip_header=1, usecols={0,1,2,3} );
    Tfis.close()
    Tfis = open(Tfile, 'r')
    Ss_ia=np.genfromtxt(Tfis, skip_footer=4, usecols={0} );
    Tfis.close()
    Rs_ia = Hs_ia[:3,:3]*(1.0/Ss_ia)
    Ts_ia = Hs_ia[:3,3]*(1.0/Ss_ia)
    
    LOG.debug( "\n************Hs IA************** \n R:\n%s \nT:\n%s \nH:\n%s", Rs_ia, Ts_ia, Hs_ia)
    
    #Initial Aligment errors
    #Rotation error - half angle between the normalized quaterions
    quat_ia = tf.unit_vector(tf.quaternion_from_matrix(Rs_ia));
    Rs_error_ia_norm = math.acos(abs(np.dot(quat_ia, quat)));
    
    #Translation error
    # x = Rs_ia*x_ia + Ts_ia = Rs_ia(x_ia + np.dot(Rs_ia.T(), Ts_ia)
    # np.dot(Rs_ia.T(), Ts_ia) correspond to trans on ia coordinate system
    Ts_error_ia = (Rs_ia.T).dot(Ts_ia) - (Rs.T).dot(Ts)
    Ts_error_ia_norm = scale_geo*scale*LA.norm(Ts_error_ia)
    
    LOG.debug(  "Error (R,T) %s , %s ", Rs_error_ia_norm , Ts_error_ia_norm )
        
        
    #read source to target "Initial Alignment" Transformation
    #************Hs ICP**************
    Tfile = src_features_dir + "/icp_transformation_" + str(percentile) + "_" + str(niter) + ".txt";    
    Tfis = open(Tfile, 'r')
    Hs_icp = np.genfromtxt(Tfis, usecols={0,1,2,3});
    Tfis.close()
    
    Hs_icp = Hs_icp.dot(Hs_ia)
    Rs_icp = Hs_icp[:3,:3]*(1.0/Ss_ia)
    Ts_icp = Hs_icp[:3,3]*(1.0/Ss_ia)
    
    LOG.debug( "\n************Hs ICP************** \n R:\n%s \nT:\n%s \nH:\n%s", Rs_icp, Ts_icp, Hs_icp)

    #ICP errors    
    #Rotation error - half angle between the normalized quaterions
    quat_icp = tf.unit_vector(tf.quaternion_from_matrix(Rs_icp));
    Rs_error_icp_norm = math.acos(abs(np.dot(quat_icp, quat)));
    
    #Translation error
    # x = Rs_ia*x_ia + Ts_ia = Rs_ia(x_ia + np.dot(Rs_ia.T(), Ts_ia)
    # np.dot(Rs_ia.T(), Ts_ia) correspond to trans on ia coordinate system
    Ts_error_icp = (Rs_icp.T).dot(Ts_icp) - (Rs.T).dot(Ts)
    Ts_error_icp_norm = scale_geo*scale*LA.norm(Ts_error_icp)
    
    LOG.debug(  "Error (R,T) %s , %s ", Rs_error_icp_norm , Ts_error_icp_norm )
    
    #read source to target "Initial Alignment" Transformation
    #************Hs ICP Nprmals**************
    Tfile = src_features_dir + "/icp_transformation_" + str(percentile) + "_" + str(niter) + "_n.txt";    
    Tfis = open(Tfile, 'r')
    Hs_icn = np.genfromtxt(Tfis, usecols={0,1,2,3});
    Tfis.close()
    
    Hs_icn = Hs_icn.dot(Hs_ia)
    Rs_icn = Hs_icn[:3,:3]*(1.0/Ss_ia)
    Ts_icn = Hs_icn[:3,3]*(1.0/Ss_ia)
    
    LOG.debug( "\n************Hs ICP Normals************** \n R:\n%s \nT:\n%s \nH:\n%s", Rs_icn, Ts_icn, Hs_icn)

    #ICP errors    
    #Rotation error - half angle between the normalized quaterions
    quat_icn = tf.unit_vector(tf.quaternion_from_matrix(Rs_icn));
    Rs_error_icn_norm = math.acos(abs(np.dot(quat_icn, quat)));
    
    #Translation error
    # x = Rs_ia*x_ia + Ts_ia = Rs_ia(x_ia + np.dot(Rs_ia.T(), Ts_ia)
    # np.dot(Rs_ia.T(), Ts_ia) correspond to trans on ia coordinate system
    Ts_error_icn = (Rs_icn.T).dot(Ts_icn) - (Rs.T).dot(Ts)
    Ts_error_icn_norm = scale_geo*scale*LA.norm(Ts_error_icn)
    
    LOG.debug(  "Error (R,T) %s , %s ", Rs_error_icn_norm , Ts_error_icn_norm )
    
    ICP_error = np.array([Rs_error_icp_norm, Ts_error_icp_norm])
    ICN_error = np.array([Rs_error_icn_norm, Ts_error_icn_norm]);
    
    # import code; code.interact(local=locals())
    
    return ICP_error,ICN_error
    def __init__(self, T):

        if ( type(T) == str):
            print "Loading Transformation from file: " + T

            Tfile = T
            #Load transformation
            Tfis = open(Tfile, 'r')
            lines = []
            lines = Tfis.readlines()
            format = len(lines)
            Tfis.seek(0) #reset file pointer

            if not (format==3 or format==4 or format==5) :
                raise ValueError("Wrong number of lines in file")
            # import code; code.interact(local=locals())


            if format == 3:
                """Handles processing a ground truth transfomation
                File saved using vxl format
                x' = s *(Rx + T)
                scale
                quaternion
                translation """
                print("Reading format 3")
                self.scale = float(lines[0])
                self.Ss = tf.scale_matrix(self.scale)
                quat_line = lines[1].split(" ")
                self.quat = tf.unit_vector(np.array([float(quat_line[3]),
                                                float(quat_line[0]),
                                                float(quat_line[1]),
                                                float(quat_line[2])]))
                self.Hs = tf.quaternion_matrix(self.quat)
                trans_line = lines[2].split(" ")
                self.Ts = np.array([float(trans_line[0]),
                               float(trans_line[1]),
                               float(trans_line[2])])
                Tfis.close()
                self.Rs = self.Hs.copy()[:3, :3]
                self.Hs[:3, 3] = self.Ts[:3]

                self.Hs = self.Ss.dot(self.Hs)  # to add again

            if format == 4 :
                """If the transformation was saved as:
                H (4x4) - = [S*R|S*T]
                """
                print("Reading format 4")

                self.Hs = np.genfromtxt(Tfile, usecols={0, 1, 2, 3})
                Tfis.close()
                scale, shear, angles, trans, persp = tf.decompose_matrix(self.Hs)
                self.scale = scale[0]  # assuming isotropic scaling

                self.Rs = self.Hs[:3, :3] * (1.0 / self.scale)
                self.Ts = self.Hs[:3, 3] * (1.0 / self.scale)
                self.quat = tf.quaternion_from_euler(angles[0], angles[1], angles[2])

            if format==5:
                """If the transformation was saved as:
                scale
                H (4x4) - = [S*R|S*T]
                """
                print("Reading format 5")
                self.Hs = np.genfromtxt(Tfis, skip_header=1, usecols={0, 1, 2, 3})
                Tfis.close()
                Tfis = open(Tfile, 'r')
                self.scale = np.genfromtxt(Tfis, skip_footer=4, usecols={0})
                Tfis.close()
                self.Rs = self.Hs[:3, :3] * (1.0 / self.scale)
                self.Ts = self.Hs[:3, 3] * (1.0 / self.scale)


                scale, shear, angles, trans, persp = tf.decompose_matrix(self.Hs)
                self.quat = tf.quaternion_from_euler(angles[0], angles[1], angles[2])

                print "Debugging translation:"
                print self.Ts
                print trans/self.scale




        elif (type(T) == np.ndarray):
            print "Loading Transformation array"
            self.Hs = T
            scale, shear, angles, trans, persp = tf.decompose_matrix(T)

            self.scale =scale[0]
            self.Rs = self.Hs[:3, :3] * (1.0 / self.scale)
            self.Ts = self.Hs[:3, 3] * (1.0 / self.scale)
            self.quat = tf.quaternion_from_euler(angles[0], angles[1], angles[2])

            print "Debugging translation:"
            print self.Ts
            print trans/self.scale

            # self.Rs = tf.quaternion_matrix(self.quat)
            # self.Ts = trans / self.scale


        print self.Hs
Пример #18
0
            result_validity.append(False)

    for i in range(len(lst)):
        # Identify class id and compose dictionary key
        class_id = int(lst[i]) - 1
        key = class_names[class_id] + '_' + video_id

        result_string = invalid_result_placeholder

        if result_validity[i] == True:
            # Convert quaternion to axis angle
            q = my_result[i][0:4]
            p = my_result[i][4:7]
            np_q = np.quaternion(q[0], q[1], q[2], q[3])
            axis_angle = quaternion.as_rotation_vector(np_q)
            axis_normalized = unit_vector(axis_angle)

            # Compose output vector as "<x y z> <axis> <angle> <6 null velocity vector> <frame id>"
            result = [
                p[0], p[1], p[2], axis_normalized[0], axis_normalized[1],
                axis_normalized[2],
                np.linalg.norm(axis_angle)
            ]
            result_string = '{:s}'.format(' '.join(
                ['{:.7f}'.format(x)
                 for x in result[:7]])) + ' 0.0 0.0 0.0 0.0 0.0 0.0'

        # Write on file
        try:
            results_files[key]
        except KeyError:
Пример #19
0
def transformation_error(**kwargs):

  root_dir          = kwargs.get('root_dir')
  descriptor_type   = kwargs.get('descriptor_type')
  radius            = kwargs.get('radius', 30)
  percentile        = kwargs.get('percentile', 99)
  nr_iterations     = kwargs.get('nr_iterations', 500)
  aux_output_string = kwargs.get('aux_output_string', "")
  descriptor_string = kwargs.get('descriptor_string', "descriptors")
  gt_fname          = kwargs.get('gt_fname', "Hs.txt")
  geo_tfile         = kwargs.get('geo_tfile', "")
  compute_scale     = kwargs.get('compute_scale', False)

  src_features_dir = root_dir + "/" + descriptor_type + "_" + str(radius)

  #************GEO**************"
  #load the geo tranformation
  GEO = reg3d_T.geo_transformation(geo_tfile)

  #************Hs**************#
  #read source to target "Ground Truth" Transformation
  Tfile = root_dir + "/" + gt_fname;
  GT_Tform = reg3d_T.gt_transformation(Tfile)

  #************PCL IA and ICP**************
  #read source to target "Initial Alignment" Transformation
  Tfile_ia = src_features_dir + "/ia_transformation_" + str(percentile) + "_" + str(nr_iterations) + "_" + aux_output_string + ".txt";
  Tfile_icp = src_features_dir + "/icp_transformation_" + str(percentile) + "_" + str(nr_iterations) + "_" + aux_output_string + ".txt"

  REG_Tform = reg3d_T.pcl_transformation(Tfile_ia, Tfile_icp, (not compute_scale))


  #Initial Alignment errors
  #Rotation error - half angle between the normalized quaternions
  quat_ia = tf.unit_vector(tf.quaternion_from_matrix(REG_Tform.Rs_ia));
  Rs_error_ia_norm = math.acos(abs(np.dot(quat_ia, GT_Tform.quat)))* 180/np.pi;

  #Translation error
  # x = REG_Tform.Rs_ia*x_ia + Ts_ia = REG_Tform.Rs_ia(x_ia + np.dot(REG_Tform.Rs_ia.T(), Ts_ia)
  # np.dot(REG_Tform.Rs_ia.T(), Ts_ia) correspond to trans on ia coordinate system
  Ts_error_ia = (REG_Tform.Rs_ia.T).dot(REG_Tform.Ts_ia) - (GT_Tform.Rs.T).dot(GT_Tform.Ts)
  Ts_error_ia_norm = GEO.scale_geo*GT_Tform.scale*LA.norm(Ts_error_ia)

  scale_error_ia = 1.0 - REG_Tform.scale_ia/GT_Tform.scale
  print  "Error (S,R,T)", scale_error_ia,  Rs_error_ia_norm , Ts_error_ia_norm

  #ICP errors
  #Rotation error - half angle between the normalized quaternions
  quat_icp = tf.unit_vector(tf.quaternion_from_matrix(REG_Tform.Rs_icp))
  Rs_error_icp_norm = math.acos(abs(np.dot(quat_icp, GT_Tform.quat)))* 180/np.pi

  #Translation error
  # x = REG_Tform.Rs_ia*x_ia + Ts_ia = REG_Tform.Rs_ia(x_ia + np.dot(REG_Tform.Rs_ia.T(), Ts_ia)
  # np.dot(REG_Tform.Rs_ia.T(), Ts_ia) correspond to trans on ia coordinate system
  Ts_error_icp = (REG_Tform.Rs_icp.T).dot(REG_Tform.Ts_icp) - (GT_Tform.Rs.T).dot(GT_Tform.Ts)
  Ts_error_icp_norm = GEO.scale_geo*GT_Tform.scale*LA.norm(Ts_error_icp)
  scale_error_icp = 1.0 - (REG_Tform.scale_icp*REG_Tform.scale_ia)/GT_Tform.scale
  print  "Error (S, R,T)", scale_error_icp, Rs_error_icp_norm , Ts_error_icp_norm

  IA_error = np.array([scale_error_ia, Rs_error_ia_norm, Ts_error_ia_norm])
  ICP_error = np.array([scale_error_icp, Rs_error_icp_norm, Ts_error_icp_norm])
  # import code; code.interact(local=locals())

  return IA_error, ICP_error
Пример #20
0
def train(palette):

    with tf.Graph().as_default():
        with tf.device('/cpu:0'):
            ###################################################
            ### placeholders  # triplet input
            data_train_placeholder = tf.placeholder(tf.float32,
                                                    shape=(BATCH_SIZE,
                                                           NUM_POINT,
                                                           data_dim))
            data_positive_train_placeholder = tf.placeholder(tf.float32,
                                                             shape=(BATCH_SIZE,
                                                                    NUM_POINT,
                                                                    data_dim))
            data_negative_train_placeholder = tf.placeholder(tf.float32,
                                                             shape=(BATCH_SIZE,
                                                                    NUM_POINT,
                                                                    data_dim))

            is_training_placeholder = tf.placeholder(tf.bool, shape=())

            cluster_mean_placeholder = tf.placeholder(tf.float32,
                                                      shape=(cluster_num, 128))
            indices_placeholder = tf.placeholder(tf.int64, shape=(BATCH_SIZE))

            batch = tf.get_variable('batch', [],
                                    initializer=tf.constant_initializer(0),
                                    trainable=False)
            bn_decay = get_bn_decay(batch)
            tf.summary.scalar('bn_decay', bn_decay)

            ###################################################
            ### get training operator
            learning_rate = get_learning_rate(batch)
            tf.summary.scalar('learning_rate', learning_rate)
            if OPTIMIZER == 'momentum':
                optimizer = tf.train.MomentumOptimizer(learning_rate,
                                                       momentum=MOMENTUM)
            elif OPTIMIZER == 'adam':
                optimizer = tf.train.AdamOptimizer(learning_rate)

            ###################################################
            tower_grads = []
            feature_anchor_gpu = []
            net_anchor_gpu = []
            net_positive_gpu = []
            net_negative_gpu = []
            cluster_loss_gpu = []
            contrastive_loss_gpu = []
            total_loss_gpu = []
            l2_loss_gpu = []
            all_loss_gpu = []
            ### get model and loss
            for i in range(NUM_GPUS):
                with tf.variable_scope(tf.get_variable_scope(),
                                       reuse=bool(i != 0)):
                    with tf.device('/gpu:%d' % (i)), tf.name_scope(
                            'gpu_%d' % (i)) as scope:
                        pc_patch = tf.slice(data_train_placeholder,
                                            [i * DEVICE_BATCH_SIZE, 0, 0],
                                            [DEVICE_BATCH_SIZE, -1, -1])
                        pc_patch_positive = tf.slice(
                            data_positive_train_placeholder,
                            [i * DEVICE_BATCH_SIZE, 0, 0],
                            [DEVICE_BATCH_SIZE, -1, -1])
                        pc_patch_negative = tf.slice(
                            data_negative_train_placeholder,
                            [i * DEVICE_BATCH_SIZE, 0, 0],
                            [DEVICE_BATCH_SIZE, -1, -1])

                        pc_features, pc_net, _ = MODEL.get_model(
                            pc_patch, is_training_placeholder, bn_decay)
                        tf.get_variable_scope().reuse_variables()
                        pc_features_positive, pc_net_positive, _ = MODEL.get_model(
                            pc_patch_positive, is_training_placeholder,
                            bn_decay)
                        pc_features_negative, pc_net_negative, _ = MODEL.get_model(
                            pc_patch_negative, is_training_placeholder,
                            bn_decay)

                        pc_indices = tf.slice(indices_placeholder,
                                              [i * DEVICE_BATCH_SIZE],
                                              [DEVICE_BATCH_SIZE])
                        pc_cluster_loss, _, _, pc_contrastive_loss, pc_total_loss = MODEL.get_loss(
                            pc_net, pc_net_positive, pc_net_negative,
                            cluster_mean_placeholder, pc_indices)
                        update_op = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
                        update_ops = tf.group(*update_op)
                        with tf.control_dependencies([update_ops]):
                            pc_l2_loss = tf.losses.get_regularization_loss()
                            pc_l2_loss = 0.00001 * pc_l2_loss

                        pc_all_loss = pc_total_loss + pc_l2_loss
                        grads = optimizer.compute_gradients(pc_all_loss)
                        tower_grads.append(grads)

                        cluster_loss_gpu.append(pc_cluster_loss)
                        contrastive_loss_gpu.append(pc_contrastive_loss)
                        total_loss_gpu.append(pc_total_loss)
                        l2_loss_gpu.append(pc_l2_loss)
                        all_loss_gpu.append(pc_all_loss)

                        feature_anchor_gpu.append(pc_features)

                        net_anchor_gpu.append(pc_net)
                        net_positive_gpu.append(pc_net_positive)
                        net_negative_gpu.append(pc_net_negative)

            cluster_loss = tf.reduce_mean(cluster_loss_gpu)
            contrastive_loss = tf.reduce_mean(contrastive_loss_gpu)
            total_loss = tf.reduce_mean(total_loss_gpu)
            l2_loss = tf.reduce_mean(l2_loss_gpu)
            all_loss = tf.reduce_mean(all_loss_gpu)

            tf.summary.scalar('cluster_loss', cluster_loss)
            tf.summary.scalar('contrastive_loss', contrastive_loss)
            tf.summary.scalar('l2_loss', l2_loss)
            tf.summary.scalar('total loss', all_loss)

            feature_anchor = tf.concat(feature_anchor_gpu, 0)
            net_anchor = tf.concat(net_anchor_gpu, 0)
            net_positive = tf.concat(net_positive_gpu, 0)
            net_negative = tf.concat(net_negative_gpu, 0)

            grads = average_gradients(tower_grads)
            apply_gradient_op = optimizer.apply_gradients(grads,
                                                          global_step=batch)

            variable_averages = tf.train.ExponentialMovingAverage(
                bn_decay, batch)
            variables_averages_op = variable_averages.apply(
                tf.trainable_variables())
            train_op = tf.group(apply_gradient_op, variables_averages_op)

        saver = tf.train.Saver(max_to_keep=11)

        # Create a session
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        sess = tf.Session(config=config)

        # Add summary writers
        merged = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'),
                                             sess.graph)

        # Init variables
        init = tf.global_variables_initializer()
        sess.run(init, {is_training_placeholder: True})

        restore_epoch, checkpoint_path = model_utils.pre_load_checkpoint(
            LOG_DIR)
        global LOG_FOUT
        if restore_epoch == 0:
            LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')
            LOG_FOUT.write(str(socket.gethostname()) + '\n')
            LOG_FOUT.write(str(FLAGS) + '\n')
        else:
            LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'a')
            saver.restore(sess, checkpoint_path)

        ops = {
            'pointclouds_pl': data_train_placeholder,
            'pointclouds_pl_positive': data_positive_train_placeholder,
            'pointclouds_pl_negative': data_negative_train_placeholder,
            'is_training_pl': is_training_placeholder,
            'buffer': cluster_mean_placeholder,
            'indics': indices_placeholder,
            'feature_anchor': feature_anchor,
            'net_anchor': net_anchor,
            'net_positive': net_positive,
            'net_negative': net_negative,
            'cluster_loss': cluster_loss,
            'contrastive_loss': contrastive_loss,
            'total_loss': total_loss,
            'l2_loss': l2_loss,
            'all_loss': all_loss,
            'train_op': train_op,
            'merged': merged,
            'step': batch,
        }

        feature_buffer = np.random.rand(file_size, 128)
        feature_buffer = trafo.unit_vector(feature_buffer, axis=1)  # normalize
        for epoch in tqdm(range(restore_epoch, MAX_EPOCH)):
            # for epoch in tqdm(range(MAX_EPOCH)):
            print('******* EPOCH %03d *******' % (epoch))
            sys.stdout.flush()

            ## clustering
            # cluster the memory buffer
            cluster_pred = SpectralClustering(
                n_clusters=cluster_num, gamma=1).fit_predict(feature_buffer)
            # cluster_pred = KMeans(n_clusters=cluster_num).fit_predict(feature_buffer)
            # cluster_pred = AgglomerativeClustering(n_clusters=cluster_num).fit_predict(feature_buffer)
            cluster_mean = np.zeros(shape=(cluster_num, 128))
            for cluster_idx in range(cluster_num):
                indices = np.where(cluster_pred == cluster_idx)[0]
                cluster_avg = np.mean(feature_buffer[indices, :], axis=0)
                cluster_mean[cluster_idx, :] = cluster_avg
            cluster_mean = trafo.unit_vector(cluster_mean, axis=1)

            train_one_epoch(sess, cluster_mean, cluster_pred, data, ops,
                            train_writer)

            ## get the feature buffer after each epoch
            list = range(pts_num)
            seed = random.sample(list, NUM_POINT)
            data_2048 = data[:, seed, :]
            num_batches = file_size // BATCH_SIZE
            feature_buffer = np.zeros(shape=(file_size, 128))
            for batch_idx in range(num_batches + 1):
                if batch_idx != num_batches:
                    start_idx = batch_idx * BATCH_SIZE
                    end_idx = (batch_idx + 1) * BATCH_SIZE
                else:
                    start_idx = file_size - BATCH_SIZE
                    end_idx = file_size
                data_input = data_2048[start_idx:end_idx, :, :]
                feed_dict = {
                    ops['pointclouds_pl']: data_input,
                    ops['is_training_pl']: False,
                }
                features = sess.run([ops['net_anchor']], feed_dict=feed_dict)
                feature_buffer[start_idx:end_idx, :] = features[0]

            if epoch % 50 == 0:
                # start_time = time.time()
                # tsne_visualization_color(feature_buffer, cluster_pred, palette, epoch)
                # end_time = time.time()
                # print 'tsne running time is %f' % (end_time-start_time)

                # start_time = time.time()
                # pca_visualization_color(feature_buffer, cluster_pred, palette, epoch)
                # end_time = time.time()
                # print 'pca running time is %f' % (end_time-start_time)

                # if epoch % 10 == 0:
                save_path = saver.save(sess,
                                       os.path.join(LOG_DIR, "model.ckpt"),
                                       global_step=epoch)
                print("Model saved in file: %s" % save_path)