Exemplo n.º 1
0
def extrusion_to_matrix(entity):
    """
    Converts an extrusion vector to a rotation matrix that denotes the transformation between world coordinate system
    and the entity's own coordinate system (described by the extrusion vector).
    """
    def arbitrary_x_axis(extrusion_normal):
        world_y = Vector((0, 1, 0))
        world_z = Vector((0, 0, 1))
        if abs(extrusion_normal[0]) < 1 / 64 and abs(extrusion_normal[1]) < 1 / 64:
            a_x = world_y.cross(extrusion_normal)
        else:
            a_x = world_z.cross(extrusion_normal)
        a_x.normalize()
        return a_x, extrusion_normal.cross(a_x)

    az = Vector(entity.extrusion)
    ax, ay = arbitrary_x_axis(az)
    ax4 = ax.to_4d()
    ay4 = ay.to_4d()
    az4 = az.to_4d()
    ax4[3] = 0
    ay4[3] = 0
    az4[3] = 0
    translation = Vector((0, 0, 0, 1))
    if hasattr(entity, "elevation"):
        if type(entity.elevation) is tuple:
            translation = Vector(entity.elevation).to_4d()
        else:
            translation = (az * entity.elevation).to_4d()
    return Matrix((ax4, ay4, az4, translation)).transposed()
Exemplo n.º 2
0
def extrusion_to_matrix(entity):
    """
    Converts an extrusion vector to a rotation matrix that denotes the transformation between world coordinate system
    and the entity's own coordinate system (described by the extrusion vector).
    """
    def arbitrary_x_axis(extrusion_normal):
        world_y = Vector((0, 1, 0))
        world_z = Vector((0, 0, 1))
        if abs(extrusion_normal[0]) < 1 / 64 and abs(
                extrusion_normal[1]) < 1 / 64:
            a_x = world_y.cross(extrusion_normal)
        else:
            a_x = world_z.cross(extrusion_normal)
        a_x.normalize()
        return a_x, extrusion_normal.cross(a_x)

    az = Vector(entity.extrusion)
    ax, ay = arbitrary_x_axis(az)
    ax4 = ax.to_4d()
    ay4 = ay.to_4d()
    az4 = az.to_4d()
    ax4[3] = 0
    ay4[3] = 0
    az4[3] = 0
    translation = Vector((0, 0, 0, 1))
    if hasattr(entity, "elevation"):
        if type(entity.elevation) is tuple:
            translation = Vector(entity.elevation).to_4d()
        else:
            translation = (az * entity.elevation).to_4d()
    return Matrix((ax4, ay4, az4, translation)).transposed()
Exemplo n.º 3
0
def point_camera_to(cam, xyz_target, up=(0, 0, 1)):
    """Points camera to target.

    Args:
        cam (bpy_types.Object): Camera object.
        xyz_target (array_like): Target point in world coordinates.
        up (array_like, optional): World vector that, when projected,
            points up in the image plane.
    """
    logger_name = thisfile + '->point_camera_to()'

    up = Vector(up)
    xyz_target = Vector(xyz_target)

    direction = xyz_target - cam.location

    # Rotate camera with quaternion so that `track` aligns with `direction`, and
    # world +z, when projected, aligns with camera +y (i.e., points up in image plane)
    track = '-Z'
    rot_quat = direction.to_track_quat(track, 'Y')
    cam.rotation_euler = (0, 0, 0)
    cam.rotation_euler.rotate(rot_quat)

    # Further rotate camera so that world `up`, when projected, points up on image plane
    # We know right now world +z, when projected, points up, so we just need to rotate
    # the camera around the lookat direction by an angle
    cam_mat, _, _ = get_camera_matrix(cam)
    up_proj = cam_mat * up.to_4d()
    orig_proj = cam_mat * Vector((0, 0, 0)).to_4d()
    try:
        up_proj = Vector((up_proj[0] / up_proj[2], up_proj[1] / up_proj[2])) - \
            Vector((orig_proj[0] / orig_proj[2], orig_proj[1] / orig_proj[2]))
    except ZeroDivisionError:
        logger.name = logger_name
        logger.error(("w in homogeneous coordinates is 0; "
                      "camera coincides with the point to project? "
                      "So can't rotate camera to ensure up vector"))
        logger.info(
            "Camera '%s' pointed to %s, but with no guarantee on up vector",
            cam.name, tuple(xyz_target))
        return cam
    # +------->
    # |
    # |
    # v
    up_proj[1] = -up_proj[1]
    # ^
    # |
    # |
    # +------->
    a = Vector((0, 1)).angle_signed(up_proj)  # clockwise is positive
    cam.rotation_euler.rotate(Quaternion(direction, a))

    logger.name = logger_name
    logger.info("Camera '%s' pointed to %s with world %s pointing up",
                cam.name, tuple(xyz_target), tuple(up))

    return cam
Exemplo n.º 4
0
def point_camera_to(cam, xyz_target, up=(0, 0, 1)):
    """
    Point camera to target

    Args:
        cam: Camera object
            bpy_types.Object
        xyz_target: Target point in world coordinates
            Array_like of 3 floats
        up: World vector, when projected, points up in the image plane
            Array_like of 3 floats
            Optional; defaults to (0, 0, 1)
    """
    logger.name = thisfile + '->point_camera_to()'

    up = Vector(up)
    xyz_target = Vector(xyz_target)

    direction = xyz_target - cam.location

    # Rotate camera with quaternion so that `track` aligns with `direction`, and
    # world +z, when projected, aligns with camera +y (i.e., points up in image plane)
    track = '-Z'
    rot_quat = direction.to_track_quat(track, 'Y')
    cam.rotation_euler.rotate(rot_quat)

    # Further rotate camera so that world `up`, when projected, points up on image plane
    # We know right now world +z, when projected, points up, so we just need to rotate
    # the camera around the lookat direction by an angle
    cam_mat, _, _ = get_camera_matrix(cam)
    up_proj = cam_mat * up.to_4d()
    orig_proj = cam_mat * Vector((0, 0, 0)).to_4d()
    up_proj = Vector((up_proj[0] / up_proj[2], up_proj[1] / up_proj[2])) - \
        Vector((orig_proj[0] / orig_proj[2], orig_proj[1] / orig_proj[2]))
    # +------->
    # |
    # |
    # v
    up_proj[1] = -up_proj[1]
    # ^
    # |
    # |
    # +------->
    a = Vector((0, 1)).angle_signed(up_proj)  # clockwise is positive
    cam.rotation_euler.rotate(Quaternion(direction, a))

    logger.info("Camera '%s' pointed to %s with world %s pointing up",
                cam.name, tuple(xyz_target), tuple(up))

    return cam
Exemplo n.º 5
0
def calculateMVBB(points, edges):
    if len(points) < 4:
        handleDegenerateCases(points, edges)
    # shift the points such that the minimum x, y, z values
    # in the entire set of points is 0.
    points = np.array(points)
    #shift = points.min(axis=0)
    #points = points - shift

    min_volume = float("inf")
    n = len(edges)
    specs = None
    # try every pair of edges (ordering is not important)
    for i2 in range(n):
        e2 = edges[i2]
        u2 = points[e2[0]] - points[e2[1]]
        for i1 in range(i2 + 1, n):
            e1 = edges[i1]
            u1 = points[e1[0]] - points[e1[1]]
            if np.dot(u2, u1) != 0:
                continue
            # transform the two edges into a orthogonal basis
            u = normcross(u2, u1)
            v = normcross(u, u2)
            w = normcross(u, v)

            # project all the points on to the basis u v w
            forward, backwards = basisChange(u, v, w)
            p = points @ forward

            volume, mins, maxes = calcVolume(p)

            # we are looking for the minimum volume box
            if volume <= min_volume:
                min_volume = volume
                specs = u, v, w, mins, maxes, backwards
    if specs is None:
        return handleDegenerateCases(points, edges)
    u, v, w, mins, maxes, backwards = specs

    # get the corner by using our projections, then shift it to move
    # it back into the same origin as the original set of points
    mins = mins.tolist()[0]
    maxes = maxes.tolist()[0]
    corner = u * mins[0] + v * mins[1] + w * mins[2]
    #corner += shift

    # create the sides which are vectors with the magnitude the length
    # of that side
    l1 = (maxes[0] - mins[0])
    l2 = (maxes[1] - mins[1])
    l3 = (maxes[2] - mins[2])
    #v1 = u * l1
    #v2 = v * l2
    #v3 = w * l3

    #original return corner, v1, v2, v3
    v4 = lambda x: Vector([*x, 0])
    vc = Vector((corner + (u * l1 + v * l2 + w * l3) / 2).tolist())
    rotTransMatrix = Matrix(list(zip(v4(u), v4(v), v4(w), vc.to_4d())))
    return rotTransMatrix, Vector([l1 / 2, l2 / 2, l3 / 2, 0])  #matrix
Exemplo n.º 6
0
def scan_advanced(scanner_object,
                  evd_file=None,
                  evd_last_scan=True,
                  timestamp=0.0,
                  world_transformation=Matrix()):

    # threshold for comparing projector and camera rays
    thresh = 0.01

    inv_scan_x = scanner_object.inv_scan_x
    inv_scan_y = scanner_object.inv_scan_y
    inv_scan_z = scanner_object.inv_scan_z

    x_multiplier = -1.0 if inv_scan_x else 1.0
    y_multiplier = -1.0 if inv_scan_y else 1.0
    z_multiplier = -1.0 if inv_scan_z else 1.0

    start_time = time.time()

    max_distance = scanner_object.kinect_max_dist
    min_distance = scanner_object.kinect_min_dist
    add_blender_mesh = scanner_object.add_scan_mesh
    add_noisy_blender_mesh = scanner_object.add_noise_scan_mesh
    noise_mu = scanner_object.kinect_noise_mu
    noise_sigma = scanner_object.kinect_noise_sigma
    noise_scale = scanner_object.kinect_noise_scale
    noise_smooth = scanner_object.kinect_noise_smooth
    res_x = scanner_object.kinect_xres
    res_y = scanner_object.kinect_yres
    flength = scanner_object.kinect_flength
    WINDOW_INLIER_DISTANCE = scanner_object.kinect_inlier_distance

    if res_x < 1 or res_y < 1:
        raise ValueError("Resolution must be > 0")

    pixel_width = max(0.0001, (math.tan(
        (parameters["horiz_fov"] / 2.0) * math.pi / 180.0) * flength) /
                      max(1.0, res_x / 2.0))  #default:0.0078
    pixel_height = max(0.0001, (math.tan(
        (parameters["vert_fov"] / 2.0) * math.pi / 180.0) * flength) /
                       max(1.0, res_y / 2.0))  #default:0.0078
    print("%f,%f" % (pixel_width, pixel_height))
    cx = float(res_x) / 2.0
    cy = float(res_y) / 2.0

    evd_buffer = []

    rays = [0.0] * res_y * res_x * 6
    ray_info = [[0.0, 0.0, 0.0]] * res_y * res_x

    baseline = Vector([0.075, 0.0,
                       0.0])  #Kinect has a baseline of 7.5 centimeters

    rayidx = 0
    ray = Vector([0.0, 0.0, 0.0])
    """Calculate the rays from the projector"""
    for y in range(res_y):
        for x in range(res_x):
            """Calculate a vector that originates at the principal point
               and points to the pixel in the sensor. This vector is then
               scaled to the maximum scanning distance 
            """

            physical_x = float(x - cx) * pixel_width
            physical_y = float(y - cy) * pixel_height
            physical_z = -float(flength)

            #ray = Vector([physical_x, physical_y, physical_z])
            ray.xyz = [physical_x, physical_y, physical_z]
            ray.normalize()
            final_ray = max_distance * ray
            rays[rayidx * 6] = final_ray[0]
            rays[rayidx * 6 + 1] = final_ray[1]
            rays[rayidx * 6 + 2] = final_ray[2]
            rays[rayidx * 6 + 3] = baseline.x
            rays[rayidx * 6 + 4] = baseline.y
            rays[rayidx * 6 + 5] = baseline.z
            """ pitch and yaw are added for completeness, normally they are
                not provided by a ToF Camera but can be derived 
                from the pixel position and the camera parameters.
            """
            yaw = math.atan(physical_x / flength)
            pitch = math.atan(physical_y / flength)
            ray_info[rayidx][0] = yaw
            ray_info[rayidx][1] = pitch
            ray_info[rayidx][2] = timestamp

            rayidx += 1
    """ Max distance is increased because the kinect is limited by 4m
        _normal distance_ to the imaging plane, We don't need shading in the
        first pass. 
        #TODO: the shading requirements might change when transmission
        is implemented (the rays might pass through glass)
    """
    returns = blensor.scan_interface.scan_rays(rays, 2.0 * max_distance, True,
                                               True, True, True)

    camera_rays = []
    projector_ray_index = -1 * numpy.ones(len(returns), dtype=numpy.uint32)

    kinect_image = numpy.zeros((res_x * res_y, 16))
    kinect_image[:, 3:11] = float('NaN')
    kinect_image[:, 11] = -1.0
    """Calculate the rays from the camera to the hit points of the projector rays"""
    for i in range(len(returns)):
        idx = returns[i][-1]
        kinect_image[idx, 12:15] = returns[i][5]

        if returns[i][0] < max_distance:
            camera_rays.extend([
                returns[i][1] + baseline.x, returns[i][2] + baseline.y,
                returns[i][3] + baseline.z
            ])
            projector_ray_index[i] = idx

    camera_returns = blensor.scan_interface.scan_rays(camera_rays,
                                                      2 * max_distance, False,
                                                      False, False)

    evd_storage = evd.evd_file(evd_file,
                               res_x,
                               res_y,
                               max_distance,
                               output_image=False,
                               output_noisy=True,
                               append_frame_counter=False)

    all_quantized_disparities = numpy.empty(res_x * res_y)
    all_quantized_disparities[:] = INVALID_DISPARITY

    disparity_weight = numpy.empty(res_x * res_y)
    disparity_weight[:] = INVALID_DISPARITY

    all_quantized_disp_mat = all_quantized_disparities.reshape(res_y, res_x)
    disp_weight_mat = disparity_weight.reshape(res_y, res_x)

    weights = numpy.array([
        1.0 / float((1.2 * x)**2 + (1.2 * y)**2) if x != 0 or y != 0 else 1.0
        for x in range(-4, 5) for y in range(-4, 5)
    ]).reshape((9, 9))
    """Build a quantized disparity map"""
    for i in range(len(camera_returns)):
        idx = camera_returns[i][-1]
        projector_idx = projector_ray_index[
            idx]  # Get the index of the original ray

        if (abs(camera_rays[idx * 3] - camera_returns[i][1]) < thresh and
                abs(camera_rays[idx * 3 + 1] - camera_returns[i][2]) < thresh
                and
                abs(camera_rays[idx * 3 + 2] - camera_returns[i][3]) < thresh
                and abs(camera_returns[i][3]) <= max_distance
                and abs(camera_returns[i][3]) >= min_distance):
            """The ray hit the projected ray, so this is a valid measurement"""
            projector_point = get_uv_from_idx(projector_idx, res_x, res_y)

            camera_x = get_pixel_from_world(
                camera_rays[idx * 3], camera_rays[idx * 3 + 2],
                flength / pixel_width) + random.gauss(noise_mu, noise_sigma)

            camera_y = get_pixel_from_world(camera_rays[idx * 3 + 1],
                                            camera_rays[idx * 3 + 2],
                                            flength / pixel_width)
            """ Kinect calculates the disparity with an accuracy of 1/8 pixel"""

            camera_x_quantized = round(camera_x * 8.0) / 8.0

            #I don't know if this accurately represents the kinect
            camera_y_quantized = round(camera_y * 8.0) / 8.0

            disparity_quantized = camera_x_quantized + projector_point[0]
            if projector_idx >= 0:
                all_quantized_disparities[projector_idx] = disparity_quantized

    processed_disparities = numpy.empty(res_x * res_y)
    fast_9x9_window(all_quantized_disparities, res_x, res_y,
                    processed_disparities, noise_smooth, noise_scale)
    """We reuse the vector objects to spare us the object creation every
       time
    """
    v = Vector([0.0, 0.0, 0.0])
    vn = Vector([0.0, 0.0, 0.0])
    """Check if the rays of the camera meet with the rays of the projector and
       add them as valid returns if they do"""
    image_idx = 0

    for i in range(len(camera_returns)):
        idx = camera_returns[i][-1]
        projector_idx = projector_ray_index[
            idx]  # Get the index of the original ray
        camera_x, camera_y = get_uv_from_idx(projector_idx, res_x, res_y)

        if projector_idx >= 0:
            disparity_quantized = processed_disparities[projector_idx]
        else:
            disparity_quantized = INVALID_DISPARITY

        if disparity_quantized < INVALID_DISPARITY and disparity_quantized != 0.0:
            disparity_quantized = -disparity_quantized
            Z_quantized = (flength *
                           (baseline.x)) / (disparity_quantized * pixel_width)
            X_quantized = baseline.x + Z_quantized * camera_x * pixel_width / flength
            Y_quantized = baseline.y + Z_quantized * camera_y * pixel_width / flength
            Z_quantized = -(Z_quantized + baseline.z)

            v.xyz=[x_multiplier*(returns[idx][1]+baseline.x),\
                   y_multiplier*(returns[idx][2]+baseline.y),\
                   z_multiplier*(returns[idx][3]+baseline.z)]
            vector_length = math.sqrt(v[0]**2 + v[1]**2 + v[2]**2)

            vt = (world_transformation * v.to_4d()).xyz

            vn.xyz = [
                x_multiplier * X_quantized, y_multiplier * Y_quantized,
                z_multiplier * Z_quantized
            ]
            vector_length_noise = vn.magnitude

            #TODO@mgschwan: prevent object creation here too
            v_noise = (world_transformation * vn.to_4d()).xyz

            kinect_image[projector_idx] = [
                ray_info[projector_idx][2], 0.0, 0.0, -returns[idx][3],
                -Z_quantized, vt[0], vt[1], vt[2], v_noise[0], v_noise[1],
                v_noise[2], returns[idx][4], returns[idx][5][0],
                returns[idx][5][1], returns[idx][5][2], projector_idx
            ]
            image_idx += 1
        else:
            """Occlusion"""
            #FIXME: Dirty hack to signal we've got an occluded/invalid value
            kinect_image[projector_idx] = [
                0.0, 0.0, 0.0, 0, numpy.nan, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                returns[idx][4], returns[idx][5][0], returns[idx][5][1],
                returns[idx][5][2], projector_idx
            ]
            pass

    for e in kinect_image:
        evd_storage.addEntry(timestamp=e[0],
                             yaw=e[1],
                             pitch=e[2],
                             distance=e[3],
                             distance_noise=e[4],
                             x=e[5],
                             y=e[6],
                             z=e[7],
                             x_noise=e[8],
                             y_noise=e[9],
                             z_noise=e[10],
                             object_id=e[11],
                             color=[e[12], e[13], e[14]],
                             idx=e[15])

    if evd_file:
        evd_storage.appendEvdFile()

    if not evd_storage.isEmpty():
        scan_data = numpy.array(evd_storage.buffer)
        additional_data = None
        if scanner_object.store_data_in_mesh:
            additional_data = evd_storage.buffer

        if add_blender_mesh:
            mesh_utils.add_mesh_from_points_tf(scan_data[:, 5:8],
                                               "Scan",
                                               world_transformation,
                                               buffer=additional_data)

        if add_noisy_blender_mesh:
            mesh_utils.add_mesh_from_points_tf(scan_data[:, 8:11],
                                               "NoisyScan",
                                               world_transformation,
                                               buffer=additional_data)

        bpy.context.scene.update()

    end_time = time.time()
    scan_time = end_time - start_time
    print("Elapsed time: %.3f" % (scan_time))

    return True, 0.0, scan_time
Exemplo n.º 7
0
def scan_advanced(scanner_object, evd_file=None, 
                  evd_last_scan=True, 
                  timestamp = 0.0,
                  world_transformation=Matrix()):


    # threshold for comparing projector and camera rays
    thresh = 0.01

    inv_scan_x = scanner_object.inv_scan_x
    inv_scan_y = scanner_object.inv_scan_y
    inv_scan_z = scanner_object.inv_scan_z 

    x_multiplier = -1.0 if inv_scan_x else 1.0
    y_multiplier = -1.0 if inv_scan_y else 1.0
    z_multiplier = -1.0 if inv_scan_z else 1.0

    start_time = time.time()

    max_distance = scanner_object.kinect_max_dist
    min_distance = scanner_object.kinect_min_dist
    add_blender_mesh = scanner_object.add_scan_mesh
    add_noisy_blender_mesh = scanner_object.add_noise_scan_mesh
    noise_mu = scanner_object.kinect_noise_mu
    noise_sigma = scanner_object.kinect_noise_sigma                
    noise_scale = scanner_object.kinect_noise_scale
    noise_smooth = scanner_object.kinect_noise_smooth                
    res_x = scanner_object.kinect_xres 
    res_y = scanner_object.kinect_yres
    flength = scanner_object.kinect_flength
    WINDOW_INLIER_DISTANCE = scanner_object.kinect_inlier_distance


    if res_x < 1 or res_y < 1:
        raise ValueError("Resolution must be > 0")

    pixel_width = 0.0078
    pixel_height = 0.0078

    cx = float(res_x) /2.0
    cy = float(res_y) /2.0 

    evd_buffer = []

    rays = [0.0]*res_y*res_x*6
    ray_info = [[0.0,0.0,0.0]]*res_y*res_x

    baseline = Vector([0.075,0.0,0.0]) #Kinect has a baseline of 7.5 centimeters


    
    rayidx=0
    ray = Vector([0.0,0.0,0.0])
    """Calculate the rays from the projector"""
    for y in range(res_y):
        for x in range(res_x):
            """Calculate a vector that originates at the principal point
               and points to the pixel in the sensor. This vector is then
               scaled to the maximum scanning distance 
            """ 

            physical_x = float(x-cx) * pixel_width
            physical_y = float(y-cy) * pixel_height
            physical_z = -float(flength)

            #ray = Vector([physical_x, physical_y, physical_z])
            ray.xyz=[physical_x, physical_y, physical_z]
            ray.normalize()
            final_ray = max_distance*ray
            rays[rayidx*6] = final_ray[0]
            rays[rayidx*6+1] = final_ray[1]
            rays[rayidx*6+2] = final_ray[2]
            rays[rayidx*6+3] = baseline.x
            rays[rayidx*6+4] = baseline.y
            rays[rayidx*6+5] = baseline.z

            """ pitch and yaw are added for completeness, normally they are
                not provided by a ToF Camera but can be derived 
                from the pixel position and the camera parameters.
            """
            yaw = math.atan(physical_x/flength)
            pitch = math.atan(physical_y/flength)
            ray_info[rayidx][0] = yaw
            ray_info[rayidx][1] = pitch
            ray_info[rayidx][2] = timestamp

            rayidx += 1

    """ Max distance is increased because the kinect is limited by 4m
        _normal distance_ to the imaging plane, We don't need shading in the
        first pass. 
        #TODO: the shading requirements might change when transmission
        is implemented (the rays might pass through glass)
    """
    returns = blensor.scan_interface.scan_rays(rays, 2.0*max_distance, True,True,True,True)

    camera_rays = []
    projector_ray_index = -1 * numpy.ones(len(returns), dtype=numpy.uint32)

    kinect_image = numpy.zeros((res_x*res_y,16))
    kinect_image[:,3:11] = float('NaN')
    kinect_image[:,11] = -1.0
    """Calculate the rays from the camera to the hit points of the projector rays"""
    for i in range(len(returns)):
        idx = returns[i][-1]
        kinect_image[idx,12:15] = returns[i][5]

        if returns[i][0] < max_distance:
          camera_rays.extend([returns[i][1]+baseline.x, returns[i][2]+baseline.y, 
                            returns[i][3]+baseline.z])
          projector_ray_index[i] = idx


    camera_returns = blensor.scan_interface.scan_rays(camera_rays, 2*max_distance, False,False,False)
    
    verts = []
    verts_noise = []
    evd_storage = evd.evd_file(evd_file, res_x, res_y, max_distance)

    all_quantized_disparities = numpy.empty(res_x*res_y)
    all_quantized_disparities[:] = INVALID_DISPARITY
    
    disparity_weight = numpy.empty(res_x*res_y)
    disparity_weight[:] = INVALID_DISPARITY

    all_quantized_disp_mat = all_quantized_disparities.reshape(res_y,res_x)
    disp_weight_mat = disparity_weight.reshape(res_y,res_x)

    weights = numpy.array([1.0/float((1.2*x)**2+(1.2*y)**2) if x!=0 or y!=0 else 1.0 for x in range(-4,5) for y in range (-4,5)]).reshape((9,9))
    
    """Build a quantized disparity map"""
    for i in range(len(camera_returns)):
        idx = camera_returns[i][-1] 
        projector_idx = projector_ray_index[idx] # Get the index of the original ray

        if (abs(camera_rays[idx*3]-camera_returns[i][1]) < thresh and
            abs(camera_rays[idx*3+1]-camera_returns[i][2]) < thresh and
            abs(camera_rays[idx*3+2]-camera_returns[i][3]) < thresh and
            abs(camera_returns[i][3]) <= max_distance and
            abs(camera_returns[i][3]) >= min_distance):
            """The ray hit the projected ray, so this is a valid measurement"""
            projector_point = get_uv_from_idx(projector_idx, res_x,res_y)

            camera_x = get_pixel_from_world(camera_rays[idx*3],camera_rays[idx*3+2],
                                   flength/pixel_width) + random.gauss(noise_mu, noise_sigma)

            camera_y = get_pixel_from_world(camera_rays[idx*3+1],camera_rays[idx*3+2],
                                   flength/pixel_width)

            """ Kinect calculates the disparity with an accuracy of 1/8 pixel"""

            camera_x_quantized = round(camera_x*8.0)/8.0
            
            #I don't know if this accurately represents the kinect 
            camera_y_quantized = round(camera_y*8.0)/8.0 

            disparity_quantized = camera_x_quantized + projector_point[0]
            if projector_idx >= 0: 
              all_quantized_disparities[projector_idx] = disparity_quantized
        
    processed_disparities = numpy.empty(res_x*res_y)
    fast_9x9_window(all_quantized_disparities, res_x, res_y, processed_disparities, noise_smooth, noise_scale)
    
    """We reuse the vector objects to spare us the object creation every
       time
    """
    v = Vector([0.0,0.0,0.0])
    vn = Vector([0.0,0.0,0.0])
    """Check if the rays of the camera meet with the rays of the projector and
       add them as valid returns if they do"""
    image_idx = 0
    
    
    for i in range(len(camera_returns)):
        idx = camera_returns[i][-1] 
        projector_idx = projector_ray_index[idx] # Get the index of the original ray
        camera_x,camera_y = get_uv_from_idx(projector_idx, res_x,res_y)

        if projector_idx >= 0:
          disparity_quantized = processed_disparities[projector_idx] 
        else:
          disparity_quantized = INVALID_DISPARITY
        
        if disparity_quantized < INVALID_DISPARITY and disparity_quantized != 0.0:
            disparity_quantized = -disparity_quantized
            Z_quantized = (flength*(baseline.x))/(disparity_quantized*pixel_width)
            X_quantized = baseline.x+Z_quantized*camera_x*pixel_width/flength
            Y_quantized = baseline.y+Z_quantized*camera_y*pixel_width/flength
            Z_quantized = -(Z_quantized+baseline.z)
            
            v.xyz=[x_multiplier*(returns[idx][1]+baseline.x),\
                   y_multiplier*(returns[idx][2]+baseline.y),\
                   z_multiplier*(returns[idx][3]+baseline.z)]
            vector_length = math.sqrt(v[0]**2+v[1]**2+v[2]**2)

            vt = (world_transformation * v.to_4d()).xyz
            verts.append ( vt )

            vn.xyz = [x_multiplier*X_quantized,y_multiplier*Y_quantized,z_multiplier*Z_quantized]
            vector_length_noise = vn.magnitude
            
            #TODO@mgschwan: prevent object creation here too
            v_noise = (world_transformation * vn.to_4d()).xyz 
            verts_noise.append( v_noise )
             
            kinect_image[projector_idx] = [ray_info[projector_idx][2], 
               0.0, 0.0, -returns[idx][3], -Z_quantized, vt[0], 
               vt[1], vt[2], v_noise[0], v_noise[1], v_noise[2], 
               returns[idx][4], returns[idx][5][0], returns[idx][5][1],
               returns[idx][5][2],projector_idx]
            image_idx += 1
        else:
          """Occlusion"""
          pass

    for e in kinect_image:
      evd_storage.addEntry(timestamp = e[0], yaw = e[1], pitch=e[2], 
        distance=e[3], distance_noise=e[4], x=e[5], y=e[6], z=e[7], 
        x_noise=e[8], y_noise=e[9], z_noise=e[10], object_id=e[11], 
        color=[e[12],e[13],e[14]], idx=e[15])
        

    if evd_file:
        evd_storage.appendEvdFile()

    if add_blender_mesh:
        mesh_utils.add_mesh_from_points_tf(verts, "Scan", world_transformation)

    if add_noisy_blender_mesh:
        mesh_utils.add_mesh_from_points_tf(verts_noise, "NoisyScan", world_transformation)            

    bpy.context.scene.update()  
    
    end_time = time.time()
    scan_time = end_time-start_time
    print ("Elapsed time: %.3f"%(scan_time))

    return True, 0.0, scan_time
Exemplo n.º 8
0
 def distance(self, other:Vector):
     """平面とVectorの距離を返す"""
     return self.dot(other.to_4d())
Exemplo n.º 9
0
 def __new__(cls, location=Vector(), normal=ZAXIS, rotation=Quaternion()):
     loc = Vector(location)
     nor = Vector(normal).normalized()
     vector = nor.to_4d()
     vector[3] = -nor.dot(loc)
     return Vector.__new__(cls, vector)