Example #1
0
def scan_advanced(scanner_object, max_distance = 10.0, evd_file=None, add_blender_mesh = False, 
                  add_noisy_blender_mesh = False, tof_res_x = 176, tof_res_y = 144, 
                  lens_angle_w=43.6, lens_angle_h=34.6, flength = 10.0,  evd_last_scan=True, 
                  noise_mu=0.0, noise_sigma=0.004, timestamp = 0.0, backfolding=False,
                  world_transformation=Matrix()):

    inv_scan_x = scanner_object.inv_scan_x
    inv_scan_y = scanner_object.inv_scan_y
    inv_scan_z = scanner_object.inv_scan_z  

    start_time = time.time()


    #10.0mm is currently the distance between the focal point and the sensor
    sensor_width = 2 * math.tan(deg2rad(lens_angle_w/2.0)) * 10.0
    sensor_height = 2 * math.tan(deg2rad(lens_angle_h/2.0)) * 10.0

    if tof_res_x == 0 or tof_res_y == 0:
        raise ValueError("Resolution must be > 0")

    pixel_width = sensor_width / float(tof_res_x)
    pixel_height = sensor_height / float(tof_res_y)


    bpy.context.scene.render.resolution_percentage

    width = bpy.context.scene.render.resolution_x
    height = bpy.context.scene.render.resolution_y
    cx = float(tof_res_x) /2.0
    cy = float(tof_res_y) /2.0 




    evd_buffer = []
    rays = []
    ray_info = []

    ray = Vector([0.0,0.0,0.0])
    for x in range(tof_res_x):
        for y in range(tof_res_y):
            """Calculate a vector that originates at the principal point
               and points to the pixel in the sensor. This vector is then
               scaled to the maximum scanning distance 
            """ 
            physical_x = float(x-cx) * pixel_width
            physical_y = float(y-cy) * pixel_height
            physical_z = -float(flength)
            ray.xyz = [physical_x, physical_y, physical_z]
            ray.normalize()
            final_ray = max_distance*ray
            rays.extend([final_ray[0],final_ray[1],final_ray[2]])


            """ pitch and yaw are added for completeness, normally they are
                not provided by a ToF Camera but can be derived 
                from the pixel position and the camera parameters.
            """
            yaw = math.atan(physical_x/flength)
            pitch = math.atan(physical_y/flength)

            ray_info.append([yaw, pitch, timestamp])
            

    returns = blensor.scan_interface.scan_rays(rays, max_distance, inv_scan_x = inv_scan_x, inv_scan_y = inv_scan_y, inv_scan_z = inv_scan_z)

    verts = []
    verts_noise = []
    evd_storage = evd.evd_file(evd_file, tof_res_x, tof_res_y, max_distance)

    reusable_vector = Vector([0.0,0.0,0.0,0.0])
    for i in range(len(returns)):
        idx = returns[i][-1]
        distance_noise =  random.gauss(noise_mu, noise_sigma)
        #If everything works substitute the previous line with this
        #distance_noise =  pixel_noise[returns[idx][-1]] + random.gauss(noise_mu, noise_sigma) 

        reusable_vector.xyzw = [returns[i][1],returns[i][2],returns[i][3],1.0]
        vt = (world_transformation * reusable_vector).xyz
        v = [returns[i][1],returns[i][2],returns[i][3]]
        verts.append ( vt )
        vector_length = math.sqrt(v[0]**2+v[1]**2+v[2]**2)
        norm_vector = [v[0]/vector_length, v[1]/vector_length, v[2]/vector_length]


        vector_length_noise = vector_length+distance_noise
        if backfolding:
           #Distances > max_distance/2..max_distance are mapped to 0..max_distance/2
           if vector_length_noise >= max_distance/2.0:
               vector_length_noise = vector_length_noise - max_distance/2.0

        reusable_vector.xyzw = [norm_vector[0]*vector_length_noise, norm_vector[1]*vector_length_noise, norm_vector[2]*vector_length_noise,1.0]
        v_noise = (world_transformation * reusable_vector).xyz
        verts_noise.append( v_noise )

        evd_storage.addEntry(timestamp = ray_info[idx][2], yaw =(ray_info[idx][0]+math.pi)%(2*math.pi), pitch=ray_info[idx][1], distance=vector_length, distance_noise=vector_length_noise, x=vt[0], y=vt[1], z=vt[2], x_noise=v_noise[0], y_noise=v_noise[1], z_noise=v_noise[2], object_id=returns[i][4], color=returns[i][5], idx=returns[i][-1])

    if evd_file:
        evd_storage.appendEvdFile()

    if add_blender_mesh:
        mesh_utils.add_mesh_from_points_tf(verts, "Scan", world_transformation)

    if add_noisy_blender_mesh:
        mesh_utils.add_mesh_from_points_tf(verts_noise, "NoisyScan", world_transformation) 

    bpy.context.scene.update()

    end_time = time.time()
    scan_time = end_time-start_time
    print ("Elapsed time: %.3f"%(scan_time))


    return True, 0.0, scan_time
Example #2
0
def scan_advanced(scanner_object,
                  evd_file=None,
                  evd_last_scan=True,
                  timestamp=0.0,
                  world_transformation=Matrix()):

    # threshold for comparing projector and camera rays
    thresh = 0.01

    inv_scan_x = scanner_object.inv_scan_x
    inv_scan_y = scanner_object.inv_scan_y
    inv_scan_z = scanner_object.inv_scan_z

    x_multiplier = -1.0 if inv_scan_x else 1.0
    y_multiplier = -1.0 if inv_scan_y else 1.0
    z_multiplier = -1.0 if inv_scan_z else 1.0

    start_time = time.time()

    max_distance = scanner_object.kinect_max_dist
    min_distance = scanner_object.kinect_min_dist
    add_blender_mesh = scanner_object.add_scan_mesh
    add_noisy_blender_mesh = scanner_object.add_noise_scan_mesh
    noise_mu = scanner_object.kinect_noise_mu
    noise_sigma = scanner_object.kinect_noise_sigma
    noise_scale = scanner_object.kinect_noise_scale
    noise_smooth = scanner_object.kinect_noise_smooth
    res_x = scanner_object.kinect_xres
    res_y = scanner_object.kinect_yres
    flength = scanner_object.kinect_flength
    WINDOW_INLIER_DISTANCE = scanner_object.kinect_inlier_distance

    if res_x < 1 or res_y < 1:
        raise ValueError("Resolution must be > 0")

    pixel_width = max(0.0001, (math.tan(
        (parameters["horiz_fov"] / 2.0) * math.pi / 180.0) * flength) /
                      max(1.0, res_x / 2.0))  #default:0.0078
    pixel_height = max(0.0001, (math.tan(
        (parameters["vert_fov"] / 2.0) * math.pi / 180.0) * flength) /
                       max(1.0, res_y / 2.0))  #default:0.0078
    print("%f,%f" % (pixel_width, pixel_height))
    cx = float(res_x) / 2.0
    cy = float(res_y) / 2.0

    evd_buffer = []

    rays = [0.0] * res_y * res_x * 6
    ray_info = [[0.0, 0.0, 0.0]] * res_y * res_x

    baseline = Vector([0.075, 0.0,
                       0.0])  #Kinect has a baseline of 7.5 centimeters

    rayidx = 0
    ray = Vector([0.0, 0.0, 0.0])
    """Calculate the rays from the projector"""
    for y in range(res_y):
        for x in range(res_x):
            """Calculate a vector that originates at the principal point
               and points to the pixel in the sensor. This vector is then
               scaled to the maximum scanning distance 
            """

            physical_x = float(x - cx) * pixel_width
            physical_y = float(y - cy) * pixel_height
            physical_z = -float(flength)

            #ray = Vector([physical_x, physical_y, physical_z])
            ray.xyz = [physical_x, physical_y, physical_z]
            ray.normalize()
            final_ray = max_distance * ray
            rays[rayidx * 6] = final_ray[0]
            rays[rayidx * 6 + 1] = final_ray[1]
            rays[rayidx * 6 + 2] = final_ray[2]
            rays[rayidx * 6 + 3] = baseline.x
            rays[rayidx * 6 + 4] = baseline.y
            rays[rayidx * 6 + 5] = baseline.z
            """ pitch and yaw are added for completeness, normally they are
                not provided by a ToF Camera but can be derived 
                from the pixel position and the camera parameters.
            """
            yaw = math.atan(physical_x / flength)
            pitch = math.atan(physical_y / flength)
            ray_info[rayidx][0] = yaw
            ray_info[rayidx][1] = pitch
            ray_info[rayidx][2] = timestamp

            rayidx += 1
    """ Max distance is increased because the kinect is limited by 4m
        _normal distance_ to the imaging plane, We don't need shading in the
        first pass. 
        #TODO: the shading requirements might change when transmission
        is implemented (the rays might pass through glass)
    """
    returns = blensor.scan_interface.scan_rays(rays, 2.0 * max_distance, True,
                                               True, True, True)

    camera_rays = []
    projector_ray_index = -1 * numpy.ones(len(returns), dtype=numpy.uint32)

    kinect_image = numpy.zeros((res_x * res_y, 16))
    kinect_image[:, 3:11] = float('NaN')
    kinect_image[:, 11] = -1.0
    """Calculate the rays from the camera to the hit points of the projector rays"""
    for i in range(len(returns)):
        idx = returns[i][-1]
        kinect_image[idx, 12:15] = returns[i][5]

        if returns[i][0] < max_distance:
            camera_rays.extend([
                returns[i][1] + baseline.x, returns[i][2] + baseline.y,
                returns[i][3] + baseline.z
            ])
            projector_ray_index[i] = idx

    camera_returns = blensor.scan_interface.scan_rays(camera_rays,
                                                      2 * max_distance, False,
                                                      False, False)

    evd_storage = evd.evd_file(evd_file,
                               res_x,
                               res_y,
                               max_distance,
                               output_image=False,
                               output_noisy=True,
                               append_frame_counter=False)

    all_quantized_disparities = numpy.empty(res_x * res_y)
    all_quantized_disparities[:] = INVALID_DISPARITY

    disparity_weight = numpy.empty(res_x * res_y)
    disparity_weight[:] = INVALID_DISPARITY

    all_quantized_disp_mat = all_quantized_disparities.reshape(res_y, res_x)
    disp_weight_mat = disparity_weight.reshape(res_y, res_x)

    weights = numpy.array([
        1.0 / float((1.2 * x)**2 + (1.2 * y)**2) if x != 0 or y != 0 else 1.0
        for x in range(-4, 5) for y in range(-4, 5)
    ]).reshape((9, 9))
    """Build a quantized disparity map"""
    for i in range(len(camera_returns)):
        idx = camera_returns[i][-1]
        projector_idx = projector_ray_index[
            idx]  # Get the index of the original ray

        if (abs(camera_rays[idx * 3] - camera_returns[i][1]) < thresh and
                abs(camera_rays[idx * 3 + 1] - camera_returns[i][2]) < thresh
                and
                abs(camera_rays[idx * 3 + 2] - camera_returns[i][3]) < thresh
                and abs(camera_returns[i][3]) <= max_distance
                and abs(camera_returns[i][3]) >= min_distance):
            """The ray hit the projected ray, so this is a valid measurement"""
            projector_point = get_uv_from_idx(projector_idx, res_x, res_y)

            camera_x = get_pixel_from_world(
                camera_rays[idx * 3], camera_rays[idx * 3 + 2],
                flength / pixel_width) + random.gauss(noise_mu, noise_sigma)

            camera_y = get_pixel_from_world(camera_rays[idx * 3 + 1],
                                            camera_rays[idx * 3 + 2],
                                            flength / pixel_width)
            """ Kinect calculates the disparity with an accuracy of 1/8 pixel"""

            camera_x_quantized = round(camera_x * 8.0) / 8.0

            #I don't know if this accurately represents the kinect
            camera_y_quantized = round(camera_y * 8.0) / 8.0

            disparity_quantized = camera_x_quantized + projector_point[0]
            if projector_idx >= 0:
                all_quantized_disparities[projector_idx] = disparity_quantized

    processed_disparities = numpy.empty(res_x * res_y)
    fast_9x9_window(all_quantized_disparities, res_x, res_y,
                    processed_disparities, noise_smooth, noise_scale)
    """We reuse the vector objects to spare us the object creation every
       time
    """
    v = Vector([0.0, 0.0, 0.0])
    vn = Vector([0.0, 0.0, 0.0])
    """Check if the rays of the camera meet with the rays of the projector and
       add them as valid returns if they do"""
    image_idx = 0

    for i in range(len(camera_returns)):
        idx = camera_returns[i][-1]
        projector_idx = projector_ray_index[
            idx]  # Get the index of the original ray
        camera_x, camera_y = get_uv_from_idx(projector_idx, res_x, res_y)

        if projector_idx >= 0:
            disparity_quantized = processed_disparities[projector_idx]
        else:
            disparity_quantized = INVALID_DISPARITY

        if disparity_quantized < INVALID_DISPARITY and disparity_quantized != 0.0:
            disparity_quantized = -disparity_quantized
            Z_quantized = (flength *
                           (baseline.x)) / (disparity_quantized * pixel_width)
            X_quantized = baseline.x + Z_quantized * camera_x * pixel_width / flength
            Y_quantized = baseline.y + Z_quantized * camera_y * pixel_width / flength
            Z_quantized = -(Z_quantized + baseline.z)

            v.xyz=[x_multiplier*(returns[idx][1]+baseline.x),\
                   y_multiplier*(returns[idx][2]+baseline.y),\
                   z_multiplier*(returns[idx][3]+baseline.z)]
            vector_length = math.sqrt(v[0]**2 + v[1]**2 + v[2]**2)

            vt = (world_transformation * v.to_4d()).xyz

            vn.xyz = [
                x_multiplier * X_quantized, y_multiplier * Y_quantized,
                z_multiplier * Z_quantized
            ]
            vector_length_noise = vn.magnitude

            #TODO@mgschwan: prevent object creation here too
            v_noise = (world_transformation * vn.to_4d()).xyz

            kinect_image[projector_idx] = [
                ray_info[projector_idx][2], 0.0, 0.0, -returns[idx][3],
                -Z_quantized, vt[0], vt[1], vt[2], v_noise[0], v_noise[1],
                v_noise[2], returns[idx][4], returns[idx][5][0],
                returns[idx][5][1], returns[idx][5][2], projector_idx
            ]
            image_idx += 1
        else:
            """Occlusion"""
            #FIXME: Dirty hack to signal we've got an occluded/invalid value
            kinect_image[projector_idx] = [
                0.0, 0.0, 0.0, 0, numpy.nan, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                returns[idx][4], returns[idx][5][0], returns[idx][5][1],
                returns[idx][5][2], projector_idx
            ]
            pass

    for e in kinect_image:
        evd_storage.addEntry(timestamp=e[0],
                             yaw=e[1],
                             pitch=e[2],
                             distance=e[3],
                             distance_noise=e[4],
                             x=e[5],
                             y=e[6],
                             z=e[7],
                             x_noise=e[8],
                             y_noise=e[9],
                             z_noise=e[10],
                             object_id=e[11],
                             color=[e[12], e[13], e[14]],
                             idx=e[15])

    if evd_file:
        evd_storage.appendEvdFile()

    if not evd_storage.isEmpty():
        scan_data = numpy.array(evd_storage.buffer)
        additional_data = None
        if scanner_object.store_data_in_mesh:
            additional_data = evd_storage.buffer

        if add_blender_mesh:
            mesh_utils.add_mesh_from_points_tf(scan_data[:, 5:8],
                                               "Scan",
                                               world_transformation,
                                               buffer=additional_data)

        if add_noisy_blender_mesh:
            mesh_utils.add_mesh_from_points_tf(scan_data[:, 8:11],
                                               "NoisyScan",
                                               world_transformation,
                                               buffer=additional_data)

        bpy.context.scene.update()

    end_time = time.time()
    scan_time = end_time - start_time
    print("Elapsed time: %.3f" % (scan_time))

    return True, 0.0, scan_time
Example #3
0
def scan_advanced(scanner_object,
                  simulation_fps=24,
                  evd_file=None,
                  noise_mu=0.0,
                  evd_last_scan=True,
                  add_blender_mesh=False,
                  add_noisy_blender_mesh=False,
                  simulation_time=0.0,
                  laser_mirror_distance=0.05,
                  world_transformation=Matrix()):

    angle_resolution = scanner_object.generic_angle_resolution
    max_distance = scanner_object.generic_max_dist
    start_angle = scanner_object.generic_start_angle
    end_angle = scanner_object.generic_end_angle
    noise_mu = scanner_object.generic_noise_mu
    noise_sigma = scanner_object.generic_noise_sigma
    laser_angles = scanner_object.generic_laser_angles
    rotation_speed = scanner_object.generic_rotation_speed

    inv_scan_x = scanner_object.inv_scan_x
    inv_scan_y = scanner_object.inv_scan_y
    inv_scan_z = scanner_object.inv_scan_z
    """Standard Error model is a Gaussian Distribution"""
    model = gaussian_error_model.GaussianErrorModel(noise_mu, noise_sigma)
    if scanner_object.generic_advanced_error_model:
        """Advanced error model is a list of distance,mu,sigma tuples"""
        model = advanced_error_model.AdvancedErrorModel(
            scanner_object.generic_advanced_error_model)

    start_time = time.time()

    current_time = simulation_time
    delta_rot = angle_resolution * math.pi / 180

    evd_storage = evd.evd_file(evd_file)

    xaxis = Vector([1, 0, 0])
    yaxis = Vector([0, 1, 0])
    zaxis = Vector([0, 0, 1])

    rays = []
    ray_info = []

    angles = end_angle - start_angle
    steps_per_rotation = angles / angle_resolution
    time_per_step = (1.0 / rotation_speed) / steps_per_rotation

    lines = (end_angle - start_angle) / angle_resolution

    laser_angles = angles_from_string(laser_angles)

    rays = []
    ray_info = []

    #Bad code???
    #steps_per_rotation = 360.0/angle_resolution
    #time_per_step = (1.0 / rotation_speed) / steps_per_rotation
    #angles = end_angle-start_angle

    lines = (end_angle - start_angle) / angle_resolution
    ray = Vector([0.0, 0.0, 0.0])
    for line in range(int(lines)):
        for laser_idx in range(len(laser_angles)):
            ray.xyz = [0, 0, max_distance]
            rot_angle = 1e-6 + start_angle + float(
                line) * angle_resolution + 180.0
            timestamp = (
                (rot_angle - 180.0) / angle_resolution) * time_per_step
            rot_angle = rot_angle % 360.0
            ray_info.append([
                deg2rad(rot_angle),
                deg2rad(laser_angles[laser_idx]), timestamp
            ])

            rotator = Euler(
                [deg2rad(-laser_angles[laser_idx]),
                 deg2rad(rot_angle), 0.0])
            ray.rotate(rotator)
            rays.extend([ray[0], ray[1], ray[2]])

    returns = blensor.scan_interface.scan_rays(rays,
                                               max_distance,
                                               inv_scan_x=inv_scan_x,
                                               inv_scan_y=inv_scan_y,
                                               inv_scan_z=inv_scan_z)

    reusable_vector = Vector([0.0, 0.0, 0.0, 1.0])
    if len(laser_angles) != len(laser_noise):
        randomize_distance_bias(len(laser_angles), noise_mu, noise_sigma)
    vp = (world_transformation * reusable_vector).xyz

    for i in range(len(returns)):
        idx = returns[i][-1]

        # Calculate noise-free point.
        reusable_vector.xyzw = [
            returns[i][1], returns[i][2], returns[i][3], 1.0
        ]
        vt = (world_transformation * reusable_vector).xyz
        v = [returns[i][1], returns[i][2], returns[i][3]]

        # Calculate noisy point.
        vector_length = math.sqrt(v[0]**2 + v[1]**2 + v[2]**2)
        distance_noise = laser_noise[
            idx % len(laser_noise)] + model.drawErrorFromModel(vector_length)
        norm_vector = [
            v[0] / vector_length, v[1] / vector_length, v[2] / vector_length
        ]
        vector_length_noise = vector_length + distance_noise
        reusable_vector.xyzw = [
            norm_vector[0] * vector_length_noise,
            norm_vector[1] * vector_length_noise,
            norm_vector[2] * vector_length_noise, 1.0
        ]
        v_noise = (world_transformation * reusable_vector).xyz

        evd_storage.addEntry(timestamp=ray_info[idx][2],
                             yaw=(ray_info[idx][0] + math.pi) % (2 * math.pi),
                             pitch=ray_info[idx][1],
                             distance=vector_length,
                             distance_noise=vector_length_noise,
                             vp_x=vp[0],
                             vp_y=vp[1],
                             vp_z=vp[2],
                             x=vt[0],
                             y=vt[1],
                             z=vt[2],
                             x_noise=v_noise[0],
                             y_noise=v_noise[1],
                             z_noise=v_noise[2],
                             object_id=returns[i][4],
                             color=returns[i][5])

    current_angle = start_angle + float(float(int(lines)) * angle_resolution)

    if evd_file:
        evd_storage.appendEvdFile()

    if not evd_storage.isEmpty():
        scan_data = numpy.array(evd_storage.buffer)
        additional_data = None
        if scanner_object.store_data_in_mesh:
            additional_data = evd_storage.buffer

        if add_blender_mesh:
            mesh_utils.add_mesh_from_points_tf(scan_data[:, 8:11],
                                               "Scan",
                                               world_transformation,
                                               buffer=additional_data)

        if add_noisy_blender_mesh:
            mesh_utils.add_mesh_from_points_tf(scan_data[:, 11:14],
                                               "NoisyScan",
                                               world_transformation,
                                               buffer=additional_data)

        bpy.context.scene.update()

    end_time = time.time()
    scan_time = end_time - start_time
    print("Elapsed time: %.3f" % (scan_time))

    return True, current_angle, scan_time
Example #4
0
def scan_advanced(scanner_object, evd_file=None, 
                  evd_last_scan=True, 
                  timestamp = 0.0,
                  world_transformation=Matrix()):


    # threshold for comparing projector and camera rays
    thresh = 0.01

    inv_scan_x = scanner_object.inv_scan_x
    inv_scan_y = scanner_object.inv_scan_y
    inv_scan_z = scanner_object.inv_scan_z 

    x_multiplier = -1.0 if inv_scan_x else 1.0
    y_multiplier = -1.0 if inv_scan_y else 1.0
    z_multiplier = -1.0 if inv_scan_z else 1.0

    start_time = time.time()

    max_distance = scanner_object.kinect_max_dist
    min_distance = scanner_object.kinect_min_dist
    add_blender_mesh = scanner_object.add_scan_mesh
    add_noisy_blender_mesh = scanner_object.add_noise_scan_mesh
    noise_mu = scanner_object.kinect_noise_mu
    noise_sigma = scanner_object.kinect_noise_sigma                
    noise_scale = scanner_object.kinect_noise_scale
    noise_smooth = scanner_object.kinect_noise_smooth                
    res_x = scanner_object.kinect_xres 
    res_y = scanner_object.kinect_yres
    flength = scanner_object.kinect_flength
    WINDOW_INLIER_DISTANCE = scanner_object.kinect_inlier_distance


    if res_x < 1 or res_y < 1:
        raise ValueError("Resolution must be > 0")

    pixel_width = 0.0078
    pixel_height = 0.0078

    cx = float(res_x) /2.0
    cy = float(res_y) /2.0 

    evd_buffer = []

    rays = [0.0]*res_y*res_x*6
    ray_info = [[0.0,0.0,0.0]]*res_y*res_x

    baseline = Vector([0.075,0.0,0.0]) #Kinect has a baseline of 7.5 centimeters


    
    rayidx=0
    ray = Vector([0.0,0.0,0.0])
    """Calculate the rays from the projector"""
    for y in range(res_y):
        for x in range(res_x):
            """Calculate a vector that originates at the principal point
               and points to the pixel in the sensor. This vector is then
               scaled to the maximum scanning distance 
            """ 

            physical_x = float(x-cx) * pixel_width
            physical_y = float(y-cy) * pixel_height
            physical_z = -float(flength)

            #ray = Vector([physical_x, physical_y, physical_z])
            ray.xyz=[physical_x, physical_y, physical_z]
            ray.normalize()
            final_ray = max_distance*ray
            rays[rayidx*6] = final_ray[0]
            rays[rayidx*6+1] = final_ray[1]
            rays[rayidx*6+2] = final_ray[2]
            rays[rayidx*6+3] = baseline.x
            rays[rayidx*6+4] = baseline.y
            rays[rayidx*6+5] = baseline.z

            """ pitch and yaw are added for completeness, normally they are
                not provided by a ToF Camera but can be derived 
                from the pixel position and the camera parameters.
            """
            yaw = math.atan(physical_x/flength)
            pitch = math.atan(physical_y/flength)
            ray_info[rayidx][0] = yaw
            ray_info[rayidx][1] = pitch
            ray_info[rayidx][2] = timestamp

            rayidx += 1

    """ Max distance is increased because the kinect is limited by 4m
        _normal distance_ to the imaging plane, We don't need shading in the
        first pass. 
        #TODO: the shading requirements might change when transmission
        is implemented (the rays might pass through glass)
    """
    returns = blensor.scan_interface.scan_rays(rays, 2.0*max_distance, True,True,True,True)

    camera_rays = []
    projector_ray_index = -1 * numpy.ones(len(returns), dtype=numpy.uint32)

    kinect_image = numpy.zeros((res_x*res_y,16))
    kinect_image[:,3:11] = float('NaN')
    kinect_image[:,11] = -1.0
    """Calculate the rays from the camera to the hit points of the projector rays"""
    for i in range(len(returns)):
        idx = returns[i][-1]
        kinect_image[idx,12:15] = returns[i][5]

        if returns[i][0] < max_distance:
          camera_rays.extend([returns[i][1]+baseline.x, returns[i][2]+baseline.y, 
                            returns[i][3]+baseline.z])
          projector_ray_index[i] = idx


    camera_returns = blensor.scan_interface.scan_rays(camera_rays, 2*max_distance, False,False,False)
    
    verts = []
    verts_noise = []
    evd_storage = evd.evd_file(evd_file, res_x, res_y, max_distance)

    all_quantized_disparities = numpy.empty(res_x*res_y)
    all_quantized_disparities[:] = INVALID_DISPARITY
    
    disparity_weight = numpy.empty(res_x*res_y)
    disparity_weight[:] = INVALID_DISPARITY

    all_quantized_disp_mat = all_quantized_disparities.reshape(res_y,res_x)
    disp_weight_mat = disparity_weight.reshape(res_y,res_x)

    weights = numpy.array([1.0/float((1.2*x)**2+(1.2*y)**2) if x!=0 or y!=0 else 1.0 for x in range(-4,5) for y in range (-4,5)]).reshape((9,9))
    
    """Build a quantized disparity map"""
    for i in range(len(camera_returns)):
        idx = camera_returns[i][-1] 
        projector_idx = projector_ray_index[idx] # Get the index of the original ray

        if (abs(camera_rays[idx*3]-camera_returns[i][1]) < thresh and
            abs(camera_rays[idx*3+1]-camera_returns[i][2]) < thresh and
            abs(camera_rays[idx*3+2]-camera_returns[i][3]) < thresh and
            abs(camera_returns[i][3]) <= max_distance and
            abs(camera_returns[i][3]) >= min_distance):
            """The ray hit the projected ray, so this is a valid measurement"""
            projector_point = get_uv_from_idx(projector_idx, res_x,res_y)

            camera_x = get_pixel_from_world(camera_rays[idx*3],camera_rays[idx*3+2],
                                   flength/pixel_width) + random.gauss(noise_mu, noise_sigma)

            camera_y = get_pixel_from_world(camera_rays[idx*3+1],camera_rays[idx*3+2],
                                   flength/pixel_width)

            """ Kinect calculates the disparity with an accuracy of 1/8 pixel"""

            camera_x_quantized = round(camera_x*8.0)/8.0
            
            #I don't know if this accurately represents the kinect 
            camera_y_quantized = round(camera_y*8.0)/8.0 

            disparity_quantized = camera_x_quantized + projector_point[0]
            if projector_idx >= 0: 
              all_quantized_disparities[projector_idx] = disparity_quantized
        
    processed_disparities = numpy.empty(res_x*res_y)
    fast_9x9_window(all_quantized_disparities, res_x, res_y, processed_disparities, noise_smooth, noise_scale)
    
    """We reuse the vector objects to spare us the object creation every
       time
    """
    v = Vector([0.0,0.0,0.0])
    vn = Vector([0.0,0.0,0.0])
    """Check if the rays of the camera meet with the rays of the projector and
       add them as valid returns if they do"""
    image_idx = 0
    
    
    for i in range(len(camera_returns)):
        idx = camera_returns[i][-1] 
        projector_idx = projector_ray_index[idx] # Get the index of the original ray
        camera_x,camera_y = get_uv_from_idx(projector_idx, res_x,res_y)

        if projector_idx >= 0:
          disparity_quantized = processed_disparities[projector_idx] 
        else:
          disparity_quantized = INVALID_DISPARITY
        
        if disparity_quantized < INVALID_DISPARITY and disparity_quantized != 0.0:
            disparity_quantized = -disparity_quantized
            Z_quantized = (flength*(baseline.x))/(disparity_quantized*pixel_width)
            X_quantized = baseline.x+Z_quantized*camera_x*pixel_width/flength
            Y_quantized = baseline.y+Z_quantized*camera_y*pixel_width/flength
            Z_quantized = -(Z_quantized+baseline.z)
            
            v.xyz=[x_multiplier*(returns[idx][1]+baseline.x),\
                   y_multiplier*(returns[idx][2]+baseline.y),\
                   z_multiplier*(returns[idx][3]+baseline.z)]
            vector_length = math.sqrt(v[0]**2+v[1]**2+v[2]**2)

            vt = (world_transformation * v.to_4d()).xyz
            verts.append ( vt )

            vn.xyz = [x_multiplier*X_quantized,y_multiplier*Y_quantized,z_multiplier*Z_quantized]
            vector_length_noise = vn.magnitude
            
            #TODO@mgschwan: prevent object creation here too
            v_noise = (world_transformation * vn.to_4d()).xyz 
            verts_noise.append( v_noise )
             
            kinect_image[projector_idx] = [ray_info[projector_idx][2], 
               0.0, 0.0, -returns[idx][3], -Z_quantized, vt[0], 
               vt[1], vt[2], v_noise[0], v_noise[1], v_noise[2], 
               returns[idx][4], returns[idx][5][0], returns[idx][5][1],
               returns[idx][5][2],projector_idx]
            image_idx += 1
        else:
          """Occlusion"""
          pass

    for e in kinect_image:
      evd_storage.addEntry(timestamp = e[0], yaw = e[1], pitch=e[2], 
        distance=e[3], distance_noise=e[4], x=e[5], y=e[6], z=e[7], 
        x_noise=e[8], y_noise=e[9], z_noise=e[10], object_id=e[11], 
        color=[e[12],e[13],e[14]], idx=e[15])
        

    if evd_file:
        evd_storage.appendEvdFile()

    if add_blender_mesh:
        mesh_utils.add_mesh_from_points_tf(verts, "Scan", world_transformation)

    if add_noisy_blender_mesh:
        mesh_utils.add_mesh_from_points_tf(verts_noise, "NoisyScan", world_transformation)            

    bpy.context.scene.update()  
    
    end_time = time.time()
    scan_time = end_time-start_time
    print ("Elapsed time: %.3f"%(scan_time))

    return True, 0.0, scan_time
Example #5
0
def scan_advanced(scanner_object,
                  rotation_speed=25.0,
                  simulation_fps=24,
                  angle_resolution=0.5,
                  max_distance=90,
                  evd_file=None,
                  noise_mu=0.0,
                  noise_sigma=0.03,
                  start_angle=-35,
                  end_angle=50,
                  evd_last_scan=True,
                  add_blender_mesh=False,
                  add_noisy_blender_mesh=False,
                  simulation_time=0.0,
                  laser_mirror_distance=0.05,
                  world_transformation=Matrix()):
    inv_scan_x = scanner_object.inv_scan_x
    inv_scan_y = scanner_object.inv_scan_y
    inv_scan_z = scanner_object.inv_scan_z

    start_time = time.time()

    current_time = simulation_time
    delta_rot = angle_resolution * math.pi / 180

    evd_storage = evd.evd_file(evd_file)

    xaxis = Vector([1, 0, 0])
    yaxis = Vector([0, 1, 0])
    zaxis = Vector([0, 0, 1])

    rays = []
    ray_info = []

    angles = end_angle - start_angle
    steps_per_rotation = angles / angle_resolution
    time_per_step = (1.0 / rotation_speed) / steps_per_rotation

    lines = (end_angle - start_angle) / angle_resolution

    for line in range(int(lines)):
        for laser_idx in range(len(laser_angles)):
            current_angle = start_angle + float(line) * angles / float(lines)
            [ray, origion, laser_angle] = calculateRay(laser_angles[laser_idx],
                                                       deg2rad(current_angle),
                                                       laser_mirror_distance)
            #TODO: Use the origin to cast the ray. Requires changes to the blender patch
            rot_angle = 1e-6 + current_angle + 180.0
            timestamp = (
                (rot_angle - 180.0) / angle_resolution) * time_per_step
            rot_angle = rot_angle % 360.0
            ray_info.append([deg2rad(rot_angle), laser_angle, timestamp])

            rays.extend([ray[0], ray[1], ray[2]])

    returns = blensor.scan_interface.scan_rays(rays,
                                               max_distance,
                                               inv_scan_x=inv_scan_x,
                                               inv_scan_y=inv_scan_y,
                                               inv_scan_z=inv_scan_z)

    reusable_vector = Vector([0.0, 0.0, 0.0, 1.0])
    vp = (world_transformation * reusable_vector).xyz

    for i in range(len(returns)):
        idx = returns[i][-1]

        # Calculate noise-free point.
        reusable_vector.xyzw = [
            returns[i][1], returns[i][2], returns[i][3], 1.0
        ]
        vt = (world_transformation * reusable_vector).xyz
        v = [returns[i][1], returns[i][2], returns[i][3]]

        # Calculate noisy point.
        distance_noise = laser_noise[idx % len(laser_noise)] + random.gauss(
            noise_mu, noise_sigma)
        vector_length = math.sqrt(v[0]**2 + v[1]**2 + v[2]**2)
        norm_vector = [
            v[0] / vector_length, v[1] / vector_length, v[2] / vector_length
        ]
        vector_length_noise = vector_length + distance_noise
        reusable_vector.xyzw = [
            norm_vector[0] * vector_length_noise,
            norm_vector[1] * vector_length_noise,
            norm_vector[2] * vector_length_noise, 1.0
        ]
        v_noise = (world_transformation * reusable_vector).xyz

        evd_storage.addEntry(timestamp=ray_info[idx][2],
                             yaw=(ray_info[idx][0] + math.pi) % (2 * math.pi),
                             pitch=ray_info[idx][1],
                             distance=vector_length,
                             distance_noise=vector_length_noise,
                             vp_x=vp[0],
                             vp_y=vp[1],
                             vp_z=vp[2],
                             x=vt[0],
                             y=vt[1],
                             z=vt[2],
                             x_noise=v_noise[0],
                             y_noise=v_noise[1],
                             z_noise=v_noise[2],
                             object_id=returns[i][4],
                             color=returns[i][5])

    current_angle = start_angle + float(float(int(lines)) * angle_resolution)

    if evd_file:
        evd_storage.appendEvdFile()

    if not evd_storage.isEmpty():
        scan_data = numpy.array(evd_storage.buffer)
        additional_data = None
        if scanner_object.store_data_in_mesh:
            additional_data = evd_storage.buffer

        if add_blender_mesh:
            mesh_utils.add_mesh_from_points_tf(scan_data[:, 8:11],
                                               "Scan",
                                               world_transformation,
                                               buffer=additional_data)

        if add_noisy_blender_mesh:
            mesh_utils.add_mesh_from_points_tf(scan_data[:, 11:14],
                                               "NoisyScan",
                                               world_transformation,
                                               buffer=additional_data)

        bpy.context.scene.update()

    end_time = time.time()
    scan_time = end_time - start_time
    print("Elapsed time: %.3f" % (scan_time))

    return True, current_angle, scan_time
Example #6
0
def scan_advanced(scanner_object,
                  rotation_speed=10.0,
                  simulation_fps=24,
                  angle_resolution=0.1728,
                  max_distance=120,
                  evd_file=None,
                  noise_mu=0.0,
                  noise_sigma=0.03,
                  start_angle=0.0,
                  end_angle=360.0,
                  evd_last_scan=True,
                  add_blender_mesh=False,
                  add_noisy_blender_mesh=False,
                  frame_time=(1.0 / 24.0),
                  simulation_time=0.0,
                  world_transformation=Matrix()):

    scanner_angles = laser_angles
    scanner_noise = laser_noise
    if scanner_object.velodyne_model == BLENSOR_VELODYNE_HDL32E:
        scanner_angles = laser_angles_32

    inv_scan_x = scanner_object.inv_scan_x
    inv_scan_y = scanner_object.inv_scan_y
    inv_scan_z = scanner_object.inv_scan_z

    start_time = time.time()

    current_time = simulation_time
    delta_rot = angle_resolution * math.pi / 180

    evd_storage = evd.evd_file(evd_file)

    xaxis = Vector([1, 0, 0])
    yaxis = Vector([0, 1, 0])
    zaxis = Vector([0, 0, 1])

    rays = []
    ray_info = []

    steps_per_rotation = 360.0 / angle_resolution
    time_per_step = (1.0 / rotation_speed) / steps_per_rotation
    angles = end_angle - start_angle

    lines = (end_angle - start_angle) / angle_resolution
    ray = Vector([0.0, 0.0, 0.0])
    for line in range(int(lines)):
        for laser_idx in range(len(scanner_angles)):
            ray.xyz = [0, 0, max_distance]
            rot_angle = 1e-6 + start_angle + float(
                line) * angle_resolution + 180.0
            timestamp = (
                (rot_angle - 180.0) / angle_resolution) * time_per_step
            rot_angle = rot_angle % 360.0
            ray_info.append([
                deg2rad(rot_angle),
                deg2rad(scanner_angles[laser_idx]), timestamp
            ])

            rotator = Euler(
                [deg2rad(-scanner_angles[laser_idx]),
                 deg2rad(rot_angle), 0.0])
            ray.rotate(rotator)
            rays.extend([ray[0], ray[1], ray[2]])

    returns = blensor.scan_interface.scan_rays(rays,
                                               max_distance,
                                               inv_scan_x=inv_scan_x,
                                               inv_scan_y=inv_scan_y,
                                               inv_scan_z=inv_scan_z)

    reusable_vector = Vector([0.0, 0.0, 0.0, 1.0])
    vp = (world_transformation * reusable_vector).xyz

    for i in range(len(returns)):
        idx = returns[i][-1]

        # Calculate noise-free point.
        reusable_vector.xyzw = (returns[i][1], returns[i][2], returns[i][3],
                                1.0)
        vt = (world_transformation * reusable_vector).xyz
        v = [returns[i][1], returns[i][2], returns[i][3]]

        # Calculate noisy point.
        distance_noise = laser_noise[idx % len(scanner_angles)] + random.gauss(
            noise_mu, noise_sigma)
        vector_length = math.sqrt(v[0]**2 + v[1]**2 + v[2]**2)
        norm_vector = [
            v[0] / vector_length, v[1] / vector_length, v[2] / vector_length
        ]
        vector_length_noise = vector_length + distance_noise
        reusable_vector.xyzw = [
            norm_vector[0] * vector_length_noise,
            norm_vector[1] * vector_length_noise,
            norm_vector[2] * vector_length_noise, 1.0
        ]
        v_noise = (world_transformation * reusable_vector).xyz

        evd_storage.addEntry(timestamp=ray_info[idx][2],
                             yaw=(ray_info[idx][0] + math.pi) % (2 * math.pi),
                             pitch=ray_info[idx][1],
                             distance=vector_length,
                             distance_noise=vector_length_noise,
                             vp_x=vp[0],
                             vp_y=vp[1],
                             vp_z=vp[2],
                             x=vt[0],
                             y=vt[1],
                             z=vt[2],
                             x_noise=v_noise[0],
                             y_noise=v_noise[1],
                             z_noise=v_noise[2],
                             object_id=returns[i][4],
                             color=returns[i][5])

    current_angle = start_angle + float(float(int(lines)) * angle_resolution)

    pre_write_time = time.time()

    if evd_file:
        evd_storage.appendEvdFile()

    if not evd_storage.isEmpty():
        scan_data = numpy.array(evd_storage.buffer)
        additional_data = None
        if scanner_object.store_data_in_mesh:
            additional_data = evd_storage.buffer

        if add_blender_mesh:
            mesh_utils.add_mesh_from_points_tf(scan_data[:, 8:11],
                                               "Scan",
                                               world_transformation,
                                               buffer=additional_data)

        if add_noisy_blender_mesh:
            mesh_utils.add_mesh_from_points_tf(scan_data[:, 11:14],
                                               "NoisyScan",
                                               world_transformation,
                                               buffer=additional_data)

        bpy.context.scene.update()

    end_time = time.time()
    scan_time = pre_write_time - start_time
    total_time = end_time - start_time
    print("Elapsed time: %.3f (scan: %.3f)" % (total_time, scan_time))

    return True, current_angle, scan_time
Example #7
0
def scan_advanced(scanner_object, rotation_speed = 25.0, simulation_fps=24, angle_resolution = 0.5, max_distance = 90, evd_file=None,noise_mu=0.0, noise_sigma=0.03, start_angle = -35, end_angle = 50, evd_last_scan=True, add_blender_mesh = False, add_noisy_blender_mesh = False, simulation_time = 0.0,laser_mirror_distance=0.05, world_transformation=Matrix()):
    inv_scan_x = scanner_object.inv_scan_x
    inv_scan_y = scanner_object.inv_scan_y
    inv_scan_z = scanner_object.inv_scan_z   

    start_time = time.time()

    current_time = simulation_time
    delta_rot = angle_resolution*math.pi/180

    evd_storage = evd.evd_file(evd_file)

    xaxis = Vector([1,0,0])
    yaxis = Vector([0,1,0])
    zaxis = Vector([0,0,1])

    rays = []
    ray_info = []

    angles = end_angle-start_angle
    steps_per_rotation = angles/angle_resolution
    time_per_step = (1.0/rotation_speed) / steps_per_rotation

    lines = (end_angle-start_angle)/angle_resolution

    for line in range(int(lines)):
        for laser_idx in range(len(laser_angles)):
            current_angle = start_angle + float(line)*angles/float(lines)
            [ray, origion, laser_angle] = calculateRay(laser_angles[laser_idx], deg2rad(current_angle), laser_mirror_distance) 
            #TODO: Use the origin to cast the ray. Requires changes to the blender patch
            rot_angle = 1e-6 + current_angle + 180.0
            timestamp = ( (rot_angle-180.0)/angle_resolution) * time_per_step 
            rot_angle = rot_angle%360.0
            ray_info.append([deg2rad(rot_angle), laser_angle, timestamp])
            
            rays.extend([ray[0],ray[1],ray[2]])


    returns = blensor.scan_interface.scan_rays(rays, max_distance, inv_scan_x = inv_scan_x, inv_scan_y = inv_scan_y, inv_scan_z = inv_scan_z)

    reusable_vector = Vector([0.0,0.0,0.0,0.0])
    for i in range(len(returns)):
        idx = returns[i][-1]
        reusable_vector.xyzw = [returns[i][1],returns[i][2],returns[i][3],1.0]
        vt = (world_transformation * reusable_vector).xyz
        v = [returns[i][1],returns[i][2],returns[i][3]]

        distance_noise =  laser_noise[idx%len(laser_noise)] + random.gauss(noise_mu, noise_sigma) 
        vector_length = math.sqrt(v[0]**2+v[1]**2+v[2]**2)
        norm_vector = [v[0]/vector_length, v[1]/vector_length, v[2]/vector_length]
        vector_length_noise = vector_length+distance_noise
        reusable_vector.xyzw = [norm_vector[0]*vector_length_noise, norm_vector[1]*vector_length_noise, norm_vector[2]*vector_length_noise,1.0]
        v_noise = (world_transformation * reusable_vector).xyz

        evd_storage.addEntry(timestamp = ray_info[idx][2], yaw =(ray_info[idx][0]+math.pi)%(2*math.pi), pitch=ray_info[idx][1], distance=vector_length, distance_noise=vector_length_noise, x=vt[0], y=vt[1], z=vt[2], x_noise=v_noise[0], y_noise=v_noise[1], z_noise=v_noise[2], object_id=returns[i][4], color=returns[i][5])

    current_angle = start_angle+float(float(int(lines))*angle_resolution)
            
    if evd_file:
        evd_storage.appendEvdFile()

    if not evd_storage.isEmpty():
        scan_data = numpy.array(evd_storage.buffer)
        additional_data = None
        if scanner_object.store_data_in_mesh:
            additional_data = evd_storage.buffer

        if add_blender_mesh:
            mesh_utils.add_mesh_from_points_tf(scan_data[:,5:8], "Scan", world_transformation, buffer=additional_data)

        if add_noisy_blender_mesh:
            mesh_utils.add_mesh_from_points_tf(scan_data[:,8:11], "NoisyScan", world_transformation, buffer=additional_data) 

        bpy.context.scene.update()

    end_time = time.time()
    scan_time = end_time-start_time
    print ("Elapsed time: %.3f"%(scan_time))

    return True, current_angle, scan_time
Example #8
0
def scan_advanced(scanner_object,
                  simulation_fps=24,
                  evd_file=None,
                  noise_mu=0.0,
                  evd_last_scan=True,
                  add_blender_mesh=False,
                  add_noisy_blender_mesh=False,
                  simulation_time=0.0,
                  laser_mirror_distance=0.05,
                  world_transformation=Matrix()):

    angle_resolution = scanner_object.generic_angle_resolution
    max_distance = scanner_object.generic_max_dist
    start_angle = scanner_object.generic_start_angle
    end_angle = scanner_object.generic_end_angle
    noise_mu = scanner_object.generic_noise_mu
    noise_sigma = scanner_object.generic_noise_sigma
    laser_angles = scanner_object.generic_laser_angles
    rotation_speed = scanner_object.generic_rotation_speed

    start_time = time.time()

    current_time = simulation_time
    delta_rot = angle_resolution * math.pi / 180

    evd_storage = evd.evd_file(evd_file)

    xaxis = Vector([1, 0, 0])
    yaxis = Vector([0, 1, 0])
    zaxis = Vector([0, 0, 1])

    rays = []
    ray_info = []

    angles = end_angle - start_angle
    steps_per_rotation = angles / angle_resolution
    time_per_step = (1.0 / rotation_speed) / steps_per_rotation

    lines = (end_angle - start_angle) / angle_resolution

    laser_angles = angles_from_string(laser_angles)

    rays = []
    ray_info = []

    steps_per_rotation = 360.0 / angle_resolution
    time_per_step = (1.0 / rotation_speed) / steps_per_rotation
    angles = end_angle - start_angle

    lines = (end_angle - start_angle) / angle_resolution
    ray = Vector([0.0, 0.0, 0.0])
    for line in range(int(lines)):
        for laser_idx in range(len(laser_angles)):
            ray.xyz = [0, 0, max_distance]
            rot_angle = 1e-6 + start_angle + float(
                line) * angle_resolution + 180.0
            timestamp = (
                (rot_angle - 180.0) / angle_resolution) * time_per_step
            rot_angle = rot_angle % 360.0
            ray_info.append([
                deg2rad(rot_angle),
                deg2rad(laser_angles[laser_idx]), timestamp
            ])

            rotator = Euler(
                [deg2rad(-laser_angles[laser_idx]),
                 deg2rad(rot_angle), 0.0])
            ray.rotate(rotator)
            rays.extend([ray[0], ray[1], ray[2]])

    returns = blensor.scan_interface.scan_rays(rays, max_distance)

    verts = []
    verts_noise = []

    reusable_vector = Vector([0.0, 0.0, 0.0, 0.0])
    if len(laser_angles) != len(laser_noise):
        randomize_distance_bias(len(laser_angles), noise_mu, noise_sigma)

    for i in range(len(returns)):
        idx = returns[i][-1]
        reusable_vector.xyzw = [
            returns[i][1], returns[i][2], returns[i][3], 1.0
        ]
        vt = (world_transformation * reusable_vector).xyz
        v = [returns[i][1], returns[i][2], returns[i][3]]
        verts.append(vt)

        distance_noise = laser_noise[idx % len(laser_noise)] + random.gauss(
            noise_mu, noise_sigma)
        vector_length = math.sqrt(v[0]**2 + v[1]**2 + v[2]**2)
        norm_vector = [
            v[0] / vector_length, v[1] / vector_length, v[2] / vector_length
        ]
        vector_length_noise = vector_length + distance_noise
        reusable_vector.xyzw = [
            norm_vector[0] * vector_length_noise,
            norm_vector[1] * vector_length_noise,
            norm_vector[2] * vector_length_noise, 1.0
        ]
        v_noise = (world_transformation * reusable_vector).xyz
        verts_noise.append(v_noise)

        evd_storage.addEntry(timestamp=ray_info[idx][2],
                             yaw=(ray_info[idx][0] + math.pi) % (2 * math.pi),
                             pitch=ray_info[idx][1],
                             distance=vector_length,
                             distance_noise=vector_length_noise,
                             x=vt[0],
                             y=vt[1],
                             z=vt[2],
                             x_noise=v_noise[0],
                             y_noise=v_noise[1],
                             z_noise=v_noise[2],
                             object_id=returns[i][4],
                             color=returns[i][5])

    current_angle = start_angle + float(float(int(lines)) * angle_resolution)

    if evd_file:
        evd_storage.appendEvdFile()

    if add_blender_mesh:
        mesh_utils.add_mesh_from_points_tf(verts, "Scan", world_transformation)

    if add_noisy_blender_mesh:
        mesh_utils.add_mesh_from_points_tf(verts_noise, "NoisyScan",
                                           world_transformation)

    bpy.context.scene.update()

    end_time = time.time()
    scan_time = end_time - start_time
    print("Elapsed time: %.3f" % (scan_time))

    return True, current_angle, scan_time
Example #9
0
def scan_advanced(scanner_object, rotation_speed = 10.0, simulation_fps=24, angle_resolution = 0.1728, max_distance = 120, evd_file=None,noise_mu=0.0, noise_sigma=0.03, start_angle = 0.0, end_angle = 360.0, evd_last_scan=True, add_blender_mesh = False, add_noisy_blender_mesh = False, frame_time = (1.0 / 24.0), simulation_time = 0.0, world_transformation=Matrix()):
    start_time = time.time()

    current_time = simulation_time
    delta_rot = angle_resolution*math.pi/180

    evd_storage = evd.evd_file(evd_file)

    xaxis = Vector([1,0,0])
    yaxis = Vector([0,1,0])
    zaxis = Vector([0,0,1])

    rays = []
    ray_info = []

    steps_per_rotation = 360.0/angle_resolution
    time_per_step = (1.0 / rotation_speed) / steps_per_rotation
    angles = end_angle-start_angle
  
    lines = (end_angle-start_angle)/angle_resolution
    ray = Vector([0.0,0.0,0.0])
    for line in range(int(lines)):
        for laser_idx in range(len(laser_angles)):
            ray.xyz = [0,0,max_distance]
            rot_angle = 1e-6 + start_angle+float(line)*angle_resolution + 180.0
            timestamp = ( (rot_angle-180.0)/angle_resolution) * time_per_step 
            rot_angle = rot_angle%360.0
            ray_info.append([deg2rad(rot_angle), deg2rad(laser_angles[laser_idx]), timestamp])
            
            rotator = Euler( [deg2rad(-laser_angles[laser_idx]), deg2rad(rot_angle), 0.0] )
            ray.rotate( rotator )
            rays.extend([ray[0],ray[1],ray[2]])

    returns = blensor.scan_interface.scan_rays(rays, max_distance)
    verts = []
    verts_noise = []

#    for idx in range((len(rays)//3)):
    
    reusable_4dvector = Vector([0.0,0.0,0.0,0.0])
    
    for i in range(len(returns)):
        idx = returns[i][-1]
        reusable_4dvector.xyzw = (returns[i][1],returns[i][2],returns[i][3],1.0)
        vt = (world_transformation * reusable_4dvector).xyz
        v = [returns[i][1],returns[i][2],returns[i][3]]
        verts.append ( vt )

        distance_noise =  laser_noise[idx%len(laser_noise)] + random.gauss(noise_mu, noise_sigma) 
        vector_length = math.sqrt(v[0]**2+v[1]**2+v[2]**2)
        norm_vector = [v[0]/vector_length, v[1]/vector_length, v[2]/vector_length]
        vector_length_noise = vector_length+distance_noise
        reusable_4dvector.xyzw=[norm_vector[0]*vector_length_noise, norm_vector[1]*vector_length_noise, norm_vector[2]*vector_length_noise,1.0]
        v_noise = (world_transformation * reusable_4dvector).xyz
        verts_noise.append( v_noise )

        evd_storage.addEntry(timestamp = ray_info[idx][2], yaw =(ray_info[idx][0]+math.pi)%(2*math.pi), pitch=ray_info[idx][1], distance=vector_length, distance_noise=vector_length_noise, x=vt[0], y=vt[1], z=vt[2], x_noise=v_noise[0], y_noise=v_noise[1], z_noise=v_noise[2], object_id=returns[i][4], color=returns[i][5])


    current_angle = start_angle+float(float(int(lines))*angle_resolution)

    pre_write_time = time.time()
            
    if evd_file:
        evd_storage.appendEvdFile()

    if add_blender_mesh:
        mesh_utils.add_mesh_from_points_tf(verts, "Scan", world_transformation)

    if add_noisy_blender_mesh:
        mesh_utils.add_mesh_from_points_tf(verts_noise, "NoisyScan", world_transformation) 
        
    bpy.context.scene.update()

    end_time = time.time()
    scan_time = pre_write_time-start_time
    total_time = end_time-start_time
    print ("Elapsed time: %.3f (scan: %.3f)"%(total_time, scan_time))

    return True, current_angle, scan_time
Example #10
0
def scan_advanced(scanner_object, simulation_fps=24, evd_file=None,noise_mu=0.0, evd_last_scan=True, add_blender_mesh = False, add_noisy_blender_mesh = False, simulation_time = 0.0,laser_mirror_distance=0.05, world_transformation=Matrix()):
    
    
    angle_resolution=scanner_object.generic_angle_resolution
    max_distance=scanner_object.generic_max_dist
    start_angle=scanner_object.generic_start_angle
    end_angle=scanner_object.generic_end_angle
    noise_mu = scanner_object.generic_noise_mu
    noise_sigma=scanner_object.generic_noise_sigma
    laser_angles = scanner_object.generic_laser_angles
    rotation_speed = scanner_object.generic_rotation_speed

    inv_scan_x = scanner_object.inv_scan_x
    inv_scan_y = scanner_object.inv_scan_y
    inv_scan_z = scanner_object.inv_scan_z    

    """Standard Error model is a Gaussian Distribution"""
    model = gaussian_error_model.GaussianErrorModel(noise_mu, noise_sigma)
    if scanner_object.generic_advanced_error_model:
      """Advanced error model is a list of distance,mu,sigma tuples"""
      model = advanced_error_model.AdvancedErrorModel(scanner_object.generic_advanced_error_model)


    start_time = time.time()

    current_time = simulation_time
    delta_rot = angle_resolution*math.pi/180

    evd_storage = evd.evd_file(evd_file)

    xaxis = Vector([1,0,0])
    yaxis = Vector([0,1,0])
    zaxis = Vector([0,0,1])

    rays = []
    ray_info = []

    angles = end_angle-start_angle
    steps_per_rotation = angles/angle_resolution
    time_per_step = (1.0/rotation_speed) / steps_per_rotation

    lines = (end_angle-start_angle)/angle_resolution

    laser_angles = angles_from_string(laser_angles)


    rays = []
    ray_info = []

    #Bad code???
    #steps_per_rotation = 360.0/angle_resolution
    #time_per_step = (1.0 / rotation_speed) / steps_per_rotation
    #angles = end_angle-start_angle
  
    lines = (end_angle-start_angle)/angle_resolution
    ray = Vector([0.0,0.0,0.0])
    for line in range(int(lines)):
        for laser_idx in range(len(laser_angles)):
            ray.xyz = [0,0,max_distance]
            rot_angle = 1e-6 + start_angle+float(line)*angle_resolution + 180.0
            timestamp = ( (rot_angle-180.0)/angle_resolution) * time_per_step 
            rot_angle = rot_angle%360.0
            ray_info.append([deg2rad(rot_angle), deg2rad(laser_angles[laser_idx]), timestamp])
            
            rotator = Euler( [deg2rad(-laser_angles[laser_idx]), deg2rad(rot_angle), 0.0] )
            ray.rotate( rotator )
            rays.extend([ray[0],ray[1],ray[2]])


    returns = blensor.scan_interface.scan_rays(rays, max_distance, inv_scan_x = inv_scan_x, inv_scan_y = inv_scan_y, inv_scan_z = inv_scan_z)

    verts = []
    verts_noise = []

    reusable_vector = Vector([0.0,0.0,0.0,0.0])
    if len(laser_angles) != len(laser_noise):
      randomize_distance_bias(len(laser_angles), noise_mu,noise_sigma)
      
    for i in range(len(returns)):
        idx = returns[i][-1]
        reusable_vector.xyzw = [returns[i][1],returns[i][2],returns[i][3],1.0]
        vt = (world_transformation * reusable_vector).xyz
        v = [returns[i][1],returns[i][2],returns[i][3]]
        verts.append ( vt )

        vector_length = math.sqrt(v[0]**2+v[1]**2+v[2]**2)
        distance_noise =  laser_noise[idx%len(laser_noise)] + model.drawErrorFromModel(vector_length) 
        norm_vector = [v[0]/vector_length, v[1]/vector_length, v[2]/vector_length]
        vector_length_noise = vector_length+distance_noise
        reusable_vector.xyzw = [norm_vector[0]*vector_length_noise, norm_vector[1]*vector_length_noise, norm_vector[2]*vector_length_noise,1.0]
        v_noise = (world_transformation * reusable_vector).xyz
        verts_noise.append( v_noise )

        evd_storage.addEntry(timestamp = ray_info[idx][2], yaw =(ray_info[idx][0]+math.pi)%(2*math.pi), pitch=ray_info[idx][1], distance=vector_length, distance_noise=vector_length_noise, x=vt[0], y=vt[1], z=vt[2], x_noise=v_noise[0], y_noise=v_noise[1], z_noise=v_noise[2], object_id=returns[i][4], color=returns[i][5])


    current_angle = start_angle+float(float(int(lines))*angle_resolution)
            
    if evd_file:
        evd_storage.appendEvdFile()

    if add_blender_mesh:
        mesh_utils.add_mesh_from_points_tf(verts, "Scan", world_transformation)

    if add_noisy_blender_mesh:
        mesh_utils.add_mesh_from_points_tf(verts_noise, "NoisyScan", world_transformation) 

    bpy.context.scene.update()

    end_time = time.time()
    scan_time = end_time-start_time
    print ("Elapsed time: %.3f"%(scan_time))

    return True, current_angle, scan_time
Example #11
0
def scan_advanced(scanner_object,
                  max_distance=120,
                  filename=None,
                  add_blender_mesh=False,
                  world_transformation=Matrix()):
    start_time = time.time()

    inv_scan_x = scanner_object.inv_scan_x
    inv_scan_y = scanner_object.inv_scan_y
    inv_scan_z = scanner_object.inv_scan_z

    x_multiplier = -1.0 if inv_scan_x else 1.0
    y_multiplier = -1.0 if inv_scan_y else 1.0
    z_multiplier = -1.0 if inv_scan_z else 1.0

    add_noisy_blender_mesh = scanner_object.add_noise_scan_mesh

    bpy.context.scene.render.resolution_percentage = 100
    bpy.context.scene.render.use_antialiasing = False

    width = bpy.context.scene.render.resolution_x
    height = bpy.context.scene.render.resolution_y
    cx = float(width) / 2.0
    cy = float(height) / 2.0

    if bpy.context.scene.camera.data.lens_unit == "MILLIMETERS":
        flength = bpy.context.scene.camera.data.lens
    else:
        print("Lens unit has to be millimeters")
        return False, 0.0, 0.0

    cam_name = bpy.context.scene.camera.name
    cam = bpy.data.cameras[cam_name]
    # TODO
    # - This assumes camera sensor is wider than it is taller.
    # - Use the sensor mode to figure out which dimensions to use.
    sensor_size = cam.sensor_width
    pixel_per_mm = blensor.globals.getPixelPerMillimeter(
        width, height, sensor_size)
    focal_length = flength * pixel_per_mm

    bpy.ops.render.render()

    zbuffer = blensorintern.copy_zbuf(bpy.data.images["Render Result"])
    depthmap = [0.0] * len(zbuffer)

    verts = []

    evd_storage = evd.evd_file(filename,
                               width,
                               height,
                               max_distance,
                               output_image=False,
                               output_noisy=True,
                               append_frame_counter=False)

    reusable_vector = Vector([0.0, 0.0, 0.0, 0.0])
    for idx in range(len(zbuffer)):
        x = float(idx % width)
        y = float(idx // width)
        dx = x - cx
        dy = y - cy

        ddist = math.sqrt(dx**2 + dy**2)

        world_ddist = (ddist * zbuffer[idx]) / focal_length

        object_distance = math.sqrt(world_ddist**2 + zbuffer[idx]**2)

        if object_distance < max_distance:
            depthmap[idx] = object_distance

        if add_blender_mesh or add_noisy_blender_mesh:
            if object_distance < max_distance:
                Z = -zbuffer[idx]
                X = -(Z * dx) / focal_length
                Y = -(Z * dy) / focal_length
                reusable_vector.xyzw = [X, Y, Z, 1.0]
                vt = (world_transformation * reusable_vector).xyz
                verts.append((x_multiplier * vt[0], y_multiplier * vt[1],
                              z_multiplier * vt[2]))

    # Update evd storage for just the depth data
    for idx, val in enumerate(depthmap):
        evd_storage.addEntry(distance=val, distance_noise=val, idx=idx)

    if filename:
        # save using evd file pipeline
        evd_storage.appendEvdFile()

        # save an additional copy using origin method
        #fh = open(filename+".backup", "w")
        #fh.buffer.write(struct.pack("ii",width,height))
        #for idx in range( width*height ):
        #    fh.buffer.write(struct.pack("d", depthmap[idx]))
        #fh.close()

    if add_blender_mesh:
        mesh_utils.add_mesh_from_points_tf(verts, "Scan", world_transformation)

    if add_noisy_blender_mesh:
        mesh_utils.add_mesh_from_points_tf(verts, "NoisyScan",
                                           world_transformation)

    bpy.context.scene.update()

    end_time = time.time()
    scan_time = end_time - start_time
    print("Elapsed time: %.3f" % (scan_time))

    return True, 0.0, scan_time