Example #1
0
def getDataset(dataset):
    if (dataset == 0):
        joint_data = ld.get_joint("joint/train_joint0")
        lidar_data = ld.get_lidar("lidar/train_lidar0")
    #    rgd_data = ld.get_rgb("cam/RGB_0")
    #    depth_data = ld.get_depth("cam/DEPTH_0")
    #    exIR_RGB = ld.getExtrinsics_IR_RGB()
    #    IRCalib = ld.getIRCalib()
    #    RGBCalib = ld.getRGBCalib()
    elif (dataset == 1):
        joint_data = ld.get_joint("joint/train_joint1")
        lidar_data = ld.get_lidar("lidar/train_lidar1")
    elif (dataset == 2):
        joint_data = ld.get_joint("joint/train_joint2")
        lidar_data = ld.get_lidar("lidar/train_lidar2")
    elif (dataset == 3):
        joint_data = ld.get_joint("joint/train_joint3")
        lidar_data = ld.get_lidar("lidar/train_lidar3")
    #    rgd_data = ld.get_rgb("cam/RGB_3")
    #    depth_data = ld.get_depth("cam/DEPTH_3")
    #    exIR_RGB = ld.getExtrinsics_IR_RGB()
    #    IRCalib = ld.getIRCalib()
    #    RGBCalib = ld.getRGBCalib()
    elif (dataset == 4):
        joint_data = ld.get_joint("joint/train_joint4")
        lidar_data = ld.get_lidar("lidar/train_lidar4")
    #    rgd_data = ld.get_rgb("cam/RGB_4")
    #    depth_data = ld.get_depth("cam/DEPTH_4")
    #    exIR_RGB = ld.getExtrinsics_IR_RGB()
    #    IRCalib = ld.getIRCalib()
    #    RGBCalib = ld.getRGBCalib()

    return joint_data, lidar_data
Example #2
0
File: main.py Project: e3u3/2DSLAM
def load_data(dataset, texture):
    '''
    Load and preprocess data
    Input:
        dataset - number of dataset to use
        texture - boolean to switch on texture mapping (include kinect data)
    Output:
        enc_data - Encoder data in dictionary, with left and right displacement, and timestamps
        imu_data -  IMU data in dictionary, with angular velocity (yaw rate), linear acceleration and timestamps
        lidar_data - LiDAR data in dictionary, with ranges, angles and timestamps
    '''
    #load data
    imu_data = ld.get_imu(dataset)
    lidar_data = ld.get_lidar(dataset)
    enc_data = ld.get_enc(dataset)
    if texture == True:
        kinect_data = ld.get_kinect_time(dataset)
    else:
        kinect_data = 0

    # remove bias for odometry, init state is (0,0,0)
    yaw_bias_av = np.mean(imu_data['av'][0:380])
    imu_data['av'] -= yaw_bias_av

    #apply band pass filter
    order = 1
    fs = 50  # sample rate, Hz
    low = 0.000001  # desired band frequency of the filter, Hz
    high = 10
    imu_data['av'] = butter_bandpass_filter(imu_data['av'], low, high, fs,
                                            order)

    return enc_data, imu_data, lidar_data, kinect_data
    def __init__(self, lfilename, jfilename):
        self.lfilename = lfilename
        self.lidar = load_data.get_lidar(self.lfilename)

        self.jfilename = jfilename
        self.joint = load_data.get_joint(self.jfilename)
        self.j_t = self.joint['ts']
        self.j_ang = self.joint['head_angles']
        self.j_n = self.j_ang[0]
        self.j_h = self.j_ang[1]
        self.pose = np.array([0.0, 0.0, 0.0])
        self.scan = None
Example #4
0
def runSlam():
    # lidarFile = 'Proj4_2018_Train/data/train_lidar1'
    # jointFile = 'Proj4_2018_Train/data/train_joint1'
    lidarFile = 'Proj4_2018_test/test_lidar'
    jointFile = 'Proj4_2018_test/test_joint'

    plt.ion()
    # plt.show()

    lidar = ld.get_lidar(lidarFile)
    joint = ld.get_joint(jointFile)

    P, W, Map, poseHistoryX, poseHistoryY = slam(lidar, joint)
    showMaps(P, W, Map, poseHistoryX, poseHistoryY)
    print 'Done'
    plt.show(block=True)
Example #5
0
def load_folder(folder_par):

    # prefix_list= ["Hokuyo","Encoders","imu"]
    # find location of period (where file extension starts) and extract unique filenames
    unique_files = list(
        set([i[i.find(".") - 2:i.find(".")] for i in os.listdir(folder_par)]))

    # load all map attributes into classes
    '''NOTE: r prefix tells interpreter the backslash is to be take literally, as any other character.'''
    map_list = [map_object(\
                            id,\
                            load_data.get_lidar(folder_par+r"/Hokuyo{0}".format(id)),\
                            load_data.get_encoder(folder_par+r"/Encoders{0}".format(id)),\
                            load_data.get_imu(folder_par+r"/imu{0}".format(id)))\
                            for id in unique_files]
    return map_list
Example #6
0
def runTextureMap():
    lidarFile = 'Proj4_2018_Train/data/train_lidar0'
    jointFile = 'Proj4_2018_Train/data/train_joint0'
    depthFile = 'Proj4_2018_Train_rgb/DEPTH_0'
    rgbFile = 'Proj4_2018_Train_rgb/RGB_0'

    plt.ion()

    lidar = ld.get_lidar(lidarFile)
    joint = ld.get_joint(jointFile)
    depth = ld.get_depth(depthFile)
    rgb = ld.get_rgb(rgbFile)

    slamWithTM(lidar, joint, depth, rgb)
    print 'Done'
    plt.show(block=True)
Example #7
0
def main():
    # read data
    lidar_file_name = "lidar/train_lidar2"
    joint_file_name = "joint/train_joint2"
    lidar_data = get_lidar(lidar_file_name)
    joint_data = get_joint(joint_file_name)

    robot_pose = get_odom(lidar_data)
    lidar_data = joint_align(lidar_data, joint_data)

    # particles init
    P = particleInit(num=128)

    # grid map init
    MAP = mapInit()

    # init parameters
    step_size = 20
    trajectory = np.empty(shape=(1, 2))
    var_scale = np.array([0.001, 0.001, 0.01 * np.pi / 180])
    lidar_angles = np.arange(-135, 135.25, 0.25) * np.pi / 180

    for i in range(0, len(lidar_data), step_size):
        if (i % 100 == 0):
            print(i)
        ''' Predict '''
        delta_pose = get_motion(robot_pose, lidar_data[i], i, step_size)
        P['states'] = motion_model_predict(P['states'], delta_pose, var_scale)
        ''' Update '''
        best_particle = measurement_model_update(MAP, P, lidar_data[i],
                                                 lidar_angles)
        trajectory = np.vstack((trajectory, [
            int(best_particle[0] / MAP['res']) + MAP['sizex'] // 2,
            int(best_particle[1] / MAP['res']) + MAP['sizey'] // 2
        ]))
        ''' Mapping '''
        MAP['map'] = mapping(MAP['map'], lidar_data[i], best_particle,
                             MAP['res'], lidar_angles)
        ''' Resample '''
        N_eff = 1 / np.sum(P['weight']**2)
        if N_eff < 0.3 * P['number']:
            print("Resampling...")
            P = resampling_wheel(P)

    # plot
    plot(MAP['map'], MAP['res'], robot_pose, trajectory)
	def __init__(self,dataset='0',data_folder='data',name=None):
		if name == None:
			lidar_file = os.path.join(data_folder,'train_lidar' +dataset)
		else:
			lidar_file = os.path.join(data_folder,name)
		lidar_data = ld.get_lidar(lidar_file)
		# substract the beginning value of yaw given by ypr
		# lidar_data[:]['rpy'][0]
		yaw_offset = lidar_data[0]['rpy'][0,2]
		position_offset = lidar_data[0]['pose'][0]
		
		for j in range(len(lidar_data)):
			lidar_data[j]['rpy'][0,2] -= yaw_offset
			lidar_data[j]['pose'][0] -= position_offset
		#self.range_theta_ = (np.arange(0,270.25,0.25) - 135)*np.pi/float(180)
		self.num_measures_ = len(lidar_data)
		self.data_ = lidar_data
Example #9
0
 def __init__(self,dataset='0',data_folder='data',name=None):
     if name == None:
         lidar_file = os.path.join(data_folder,'train_lidar' +dataset)
     else:
         lidar_file = os.path.join(data_folder,name)
     lidar_data = ld.get_lidar(lidar_file)
     # substract the beginning value of yaw given by ypr
     # lidar_data[:]['rpy'][0]
     yaw_offset = lidar_data[0]['rpy'][0,2]
     for j in range(len(lidar_data)):
         lidar_data[j]['rpy'][0,2] -= yaw_offset
     self.range_theta_ = np.arange(0,270.25,0.25)*np.pi/float(180)
     self.num_measures_ = len(lidar_data)
     self.data_ = lidar_data
     # self._read_lidar(lidar_data)
     # Limitation of the lidar's ray
     self.L_MIN = 0.001
     self.L_MAX = 30
     self.res_ = 0.25 # (angular resolution of rada = 0.25 degrees)
Example #10
0
    def __init__(self, dataset='0', data_folder='data', name=None):

        if name == None:
            lidar_file = os.path.join(data_folder, 'train_lidar' + dataset)
        else:
            lidar_file = os.path.join(data_folder, name)

        lidar_data = ld.get_lidar(lidar_file)

        yaw_offset = lidar_data[0]['rpy'][0, 2]
        for j in range(len(lidar_data)):
            lidar_data[j]['rpy'][0, 2] -= yaw_offset

        self.range_theta = np.arange(0, 270.25, 0.25) * np.pi / float(180)
        self.num_measures = len(lidar_data)
        self.data_ = lidar_data

        self.L_MIN = 0.001
        self.L_MAX = 30
        self.res_ = 0.25
Example #11
0
import mapping_functions_17 as map_func

def find_nearest_time(lidar_ts, joint_ts):
    joint_idx = np.zeros(np.shape((lidar_ts)))
    for i in range(0,len(np.transpose(lidar_ts))):
        joint_idx[:,i] = np.abs(joint_ts - lidar_ts[:,i]).argmin()
    return joint_idx

ti = time.time()
#%% Load data
lidar_data_index = 0

lidar_data = io.loadmat("data/train_lidar"+str(lidar_data_index)+".mat")
joint_data = io.loadmat("data/train_joint"+str(lidar_data_index)+".mat")
joint_ts = joint_data['ts']
temp = ld.get_lidar('data/train_lidar'+str(lidar_data_index))
head_angles = joint_data['head_angles']

#%% Sync time
lidar_ts = []
for i in range(len(temp)):
    ts = float(temp[i]['t'])
    lidar_ts.append(ts)

lidar_ts = np.matrix(lidar_ts)

joint_idx = find_nearest_time(lidar_ts, joint_ts).astype(int)

#%% Init MAP
MAP, prob_map = map_func.init_map()
Example #12
0
def TM(data_folder_name, datanum, rgbd_flag):
	if test:
		name0 = 'test'
	else:
		name0 = 'train'
	if not if_multiple_data:
		datanum = ''

	'''
	# Load kinect parameters
	fex = open('./cameraParam/exParams.pkl', 'rb')
	fIR = open('./cameraParam/IRcam_Calib_result.pkl', 'rb')
	fRGB = open('./cameraParam/RGBcamera_Calib_result.pkl', 'rb')
	exParams = pickle.load(fex, encoding = 'bytes')
	irParams = pickle.load(fIR, encoding = 'bytes')
	rgbParams = pickle.load(fRGB, encoding = 'bytes')
	# get camera intrinsic parameters
	fx_ir = irParams[b'fc'][0]
	fy_ir = irParams[b'fc'][1]
	px_ir = irParams[b'cc'][0]
	py_ir = irParams[b'cc'][1]
	fx_rgb = rgbParams[b'fc'][0]
	fy_rgb = rgbParams[b'fc'][1]
	px_rgb = rgbParams[b'cc'][0]
	py_rgb = rgbParams[b'cc'][1]
	'''

	fx_ir, fy_ir = 364.457362486, 364.542810627
	px_ir, py_ir = 258.422487562, 202.48713994
	fx_rgb, fy_rgb = 1049.3317526, 1051.31847629
	px_rgb, py_rgb = 956.910516428, 533.452032441
	# get transformation from depth/IR camera to RGB camera
	# T_alignCams = alignCams(exParams[b'R'], exParams[b'T'])
	T_alignCams = np.array([[0.99996855, 0.00589981, 0.00529992, 52.2682/1000],
							[-0.00589406, 0.99998202, -0.00109998, 1.5192/1000],
							[-0.00530632, 0.00106871, 0.99998535, -0.6059/1000],
							[0., 0., 0., 1.]])

	# load data
	lidar_new = ld.get_lidar(os.path.join(data_folder_name, name0 + '_lidar' + str(datanum)))
	joint_new = ld.get_joint(os.path.join(data_folder_name, name0 + '_joint' + str(datanum)))

	dpt_name = os.path.join(rgbd_folder_name, 'DEPTH' + (if_multiple_data * ('_' + str(datanum))))
	dpt_mat = ld.get_depth(dpt_name)

	image_name = os.path.join(rgbd_folder_name, 'RGB' + (if_multiple_data * ('_' + str(datanum))) + ( \
		if_multiple_mat * ('_' + str(1))))
	rgb = ld.get_rgb(image_name)

	# starting time and intervals for data scan
	mints = 0
	int_skip = 1

	#prev_img_ind = 0
	h = 424  # depth image height & width
	w = 512
	xr = np.arange(w)
	yr = np.arange(h)
	u_dpt, v_dpt = np.meshgrid(xr, yr)
	# v=rows in mat/y-axis
	# u=cols in mat/x-axis

	# threshold of plane fitting
	eps = 0.02

	# init video for texture map
	map = np.load('map' + str(datanum) + '.npy')
	texture_videoname = 'goodtexture' + str(datanum) + '.mp4'
	video_t = cv2.VideoWriter(texture_videoname, cv2.VideoWriter_fourcc('m', 'p', '4', 'v'), \
							15, (np.shape(map)[1], np.shape(map)[0]))
	cv2.namedWindow(texture_videoname, cv2.WINDOW_NORMAL)

	img_videoname = 'grounddetection' + str(datanum) + '.mp4'
	video_img=cv2.VideoWriter(img_videoname, cv2.VideoWriter_fourcc('m', 'p', '4', 'v'), \
							15, (1920, 1080))
	cv2.namedWindow('im_to_show', cv2.WINDOW_NORMAL)

	display_floor = np.tile((map > 0)[:, :, np.newaxis], (1, 1, 3)) * 125
	display_floor = display_floor.astype('uint8')

	#path_rx = np.load('path_rx'+str(datanum)+'.npy')
	#path_ry = np.load('path_ry'+str(datanum)+'.npy')
	path_x = np.load('path_x'+str(datanum)+'.npy')
	path_y = np.load('path_y'+str(datanum)+'.npy')
	path_theta=np.load('path_theta'+str(datanum)+'.npy')
	path_ts=np.load('path_ts'+str(datanum)+'.npy')

	# cell to meter
	path_x=(path_x+1)*0.1-50
	path_y = (path_y + 1) * 0.1 - 50

	# SLAM
	for i in np.arange(mints, len(dpt_mat), int_skip):
		i=int(i)
		print('\ndepth image ' + str(i))

		# check whether to get frames from different RGB mat
		samergb=(i+1)%300
		this_rgb=int((i+1)/300)
		if not samergb:
			if this_rgb ==rgbnum:
				print('\n...texture mapping end!')
				video_t.release()
				video_img.release()
				break
			print('\nTime to go with another RGB mat...')
			rgb=ld.get_rgb(os.path.join(rgbd_folder_name, 'RGB' + (if_multiple_data * ('_' + str(datanum))) + ( \
				if_multiple_mat * ('_' + str(this_rgb+1)))))

		# sync time stamps
		ts_diff = abs(joint_new['ts'] - dpt_mat[i]['t'])[0, :]
		ind_j = np.where(ts_diff == min(ts_diff))[0][0]
		ts_diff = abs(path_ts- dpt_mat[i]['t'])
		ind_p = np.where(ts_diff == min(ts_diff))[0]
		#pdb.set_trace()

		# load image
		#pdb.set_trace()
		image=rgb[i - 300*(this_rgb)]['image']
		dpt=dpt_mat[i]['depth']
		dpt = dpt / 1000  # depth in meter now
		head_angles = joint_new['head_angles'][:, ind_j]
		rpy = joint_new['rpy'][:, ind_j] - joint_new['rpy'][:, 0]

		# 3D in dpt frame
		Pts_dpt = img_to_world(fx_ir, fy_ir, px_ir, py_ir, u_dpt, v_dpt, dpt[v_dpt, u_dpt])
		# 3D in cam frame
		valid_homo = np.hstack((Pts_dpt, np.ones([np.shape(Pts_dpt)[0], 1])))# n*4
		Pts_rgb = np.dot(T_alignCams, valid_homo.T)  # 4*n
		# 2D in cam frame
		u_rgb, v_rgb = world_to_img(fx_rgb, fy_rgb, px_rgb, py_rgb, Pts_rgb.T)
		u_rgb = u_rgb.astype(np.int)
		v_rgb = v_rgb.astype(np.int)
		# get valid indices
		ind_max = np.logical_and(u_rgb < 1920, v_rgb < 1080)
		ind_min = np.logical_and(u_rgb >= 0, v_rgb >= 0)
		ind_range=np.logical_and(ind_max, ind_min)

		# 3D in world frame
		T=kinectToGlobal(head_angles, rpy, \
						 np.array([path_x[ind_p], path_y[ind_p], path_theta[ind_p]]))
		#pdb.set_trace()
		R = np.array([[0, 0, 1, 0], [-1, 0, 0, 0], [0, -1, 0, 0], [0,0,0,1]])
		Pts_w=np.dot(T, np.dot(R,Pts_rgb)) #4*n
		#pdb.set_trace()

		# get points close to ground plane
		mask_g=(Pts_w[2,:]<eps)
		ind_valid=np.logical_and(ind_range, mask_g)
		true_pos=Pts_w[:2, ind_valid]

		# pos in grid cells
		x_true_pos= np.ceil((true_pos[0,:] - (-50)) / 0.1).astype(np.int16) - 1
		y_true_pos = np.ceil((true_pos[1,:] - (-50)) / 0.1).astype(np.int16) - 1
		x_true_pos = x_true_pos.astype(np.int)
		y_true_pos = y_true_pos.astype(np.int)

		# visualization
		img_base = np.zeros_like(image)
		img_base[v_rgb[ind_valid], u_rgb[ind_valid]] += 1
		img_base = cv2.dilate(img_base, kernel = np.ones((5,5),np.uint8), iterations = 1)
		gmask = img_base[:, :, 2].copy()
		g = np.stack((np.zeros_like(gmask), np.zeros_like(gmask), gmask), 2)
		im_to_show = image.copy()+g*255*0.3
		cv2.imshow('im_to_show', im_to_show.astype(np.uint8))
		key = cv2.waitKey(100)
		video_img.write(im_to_show.astype(np.uint8))

		# adding texture
		display_floor[x_true_pos, y_true_pos, 2] = image[v_rgb[ind_valid], u_rgb[ind_valid], 0]
		display_floor[x_true_pos, y_true_pos, 1] = image[v_rgb[ind_valid], u_rgb[ind_valid], 1]
		display_floor[x_true_pos, y_true_pos, 0] = image[v_rgb[ind_valid], u_rgb[ind_valid], 2]
		cv2.imshow(texture_videoname, display_floor)
		key = cv2.waitKey(100)
		video_t.write(display_floor)
Example #13
0
    map['occu_grids'][
        matrix > 0] = map['occu_grids'][matrix > 0] - log_odds_free_update

    # saturation limit of log odds
    map['occu_grids'] = saturationCheck(map['occu_grids'])

    return map


if __name__ == '__main__':
    if test:
        name0 = 'test'
    else:
        name0 = 'train'

    lidar_new = ld.get_lidar(
        os.path.join(data_folder_name, name0 + '_lidar' + str(datanum)))
    joint_new = ld.get_joint(
        os.path.join(data_folder_name, name0 + '_joint' + str(datanum)))

    log_odds_free_update = 0.75
    log_odds_occupied_update = 1.5
    log_odds_free_sat = -100
    log_odds_occupied_sat = 100

    # init map
    map = {}
    map['res'] = 0.1  # meters
    map['xmin'] = -30  # meters
    map['ymin'] = -30
    map['xmax'] = 30
    map['ymax'] = 30
Example #14
0
# In[10]:

#load data
import matplotlib.pyplot as plt
import load_data as ld
import math
import numpy as np
import MapUtils as MU
from scipy.special import logsumexp

acc_x, acc_y, acc_z, gyro_x, gyro_y, gyro_z, imu_ts = ld.get_imu('data/imu23')

FR, FL, RR, RL, enc_ts = ld.get_encoder('data/Encoders23')

lidar = ld.get_lidar('data/Hokuyo23')

# In[11]:


# Bresenham's ray tracing algorithm in 2D.
# Inputs:
#(sx, sy) start point of ray
#(ex, ey) end point of ray
def bresenham2D(sx, sy, ex, ey):
    sx = int(round(sx))
    sy = int(round(sy))
    ex = int(round(ex))
    ey = int(round(ey))
    dx = abs(ex - sx)
    dy = abs(ey - sy)
Example #15
0
File: SLAM.py Project: hal2001/SLAM
def SLAM(data_folder_name, datanum):
    if test:
        name0 = 'test'
    else:
        name0 = 'train'
    if not if_multiple_data:
        datanum = ''

    #load data
    lidar_new = ld.get_lidar(
        os.path.join(data_folder_name, name0 + '_lidar' + str(datanum)))
    joint_new = ld.get_joint(
        os.path.join(data_folder_name, name0 + '_joint' + str(datanum)))

    # init map
    map = {}
    map = initMap(map)

    # init particles
    pr = {}
    pr = initParticles(pr)

    # starting time and intervals for data scan
    mints = 0
    int_skip = 10

    # init video
    videoname = 'slam' + str(datanum) + '.mp4'
    video = cv2.VideoWriter(videoname, cv2.VideoWriter_fourcc('m', 'p', '4', 'v'), \
          15, (map['sizey'], map['sizex']))
    cv2.namedWindow(videoname, cv2.WINDOW_NORMAL)

    # SLAM
    for i in np.arange(mints, len(lidar_new), int_skip):
        print('\nscan ' + str(i))

        # sync time stamps
        ts_diff = abs(joint_new['ts'] - lidar_new[i]['t'])[0, :]
        ind_j = np.where(ts_diff == min(ts_diff))[0][0]
        #print('lidar'+str(i)+str(lidar_new[i]['t']))
        #print('joint'+str(ind_j)+ str(joint_new['ts'][:, ind_j]))

        # first scan to update map only
        if i == mints:
            ind_j_first = ind_j
            pr['best'] = lidar_new[i]['pose']
            pr['best'] = pr['best'].astype('float')
            pr['odom'] = np.tile(pr['best'], [pr['num'], 1])
            ray_g = raycast(joint_new, lidar_new, map, i, ind_j, ind_j_first,
                            pr['best'])
            map = updateMap(map, pr['best'], ray_g)

        # standard deviation for gaussian noise in motion update
        binary_map = (map['occu_grids'] > 0).astype('uint8')
        delta_pose = lidar_new[i]['pose'] - lidar_new[i - int_skip]['pose']
        stdx = max(0.001 * int_skip, abs(delta_pose[0, 0]) / 3)
        stdy = max(0.001 * int_skip, abs(delta_pose[0, 1]) / 3)
        stdtheta = max(0.005 * int_skip, abs(delta_pose[0, 2]) / 3)

        # for each particle
        for p in range(pr['num']):
            # particle propagation
            motion_noise = np.random.normal(0, [[stdx], [stdy], [stdtheta]],
                                            (3, 1))
            pr['odom'][p, :] = motionUpdate(lidar_new[i]['pose'],
                                            lidar_new[i - int_skip]['pose'],
                                            pr['odom'][p, :], motion_noise)

            # use map correlation to update particle weights
            ray_g = raycast(joint_new, lidar_new, map, i, ind_j, ind_j_first,
                            pr['odom'][p, :][np.newaxis, :])  #3*n
            ray_cell_x, ray_cell_y = mToCell(ray_g[0, :], ray_g[1, :], map)
            corrsum = compCorr(binary_map, map, ray_cell_x, ray_cell_y)
            pr['weights'][p, :] = pr['weights'][p, :] * corrsum

        # find best particle with maximum weights
        ind_best = np.where(pr['weights'] == max(pr['weights']))
        ind_best = ind_best[0][0]
        pr['best'] = pr['odom'][ind_best][np.newaxis, :]

        # weights normalization
        pr['weights'] = pr['weights'] / sum(pr['weights'])

        # update map with best particle only
        ray_g = raycast(joint_new, lidar_new, map, i, ind_j, ind_j_first,
                        pr['best'])
        map = updateMap(map, pr['best'], ray_g)

        #print('...num of particles with max weights: '+str(np.where(pr['weights'] == max(pr['weights']))[0].size))
        print('...max weights: ' + str(max(pr['weights'])))
        print('...best pose: ' + str(pr['best']))

        # resample particles when effective particles is less than threshold
        Neff = sum(pr['weights'])**2 / sum(pr['weights']**2)
        print('...effective particle number: ' + str(Neff))
        if Neff < pr['Neff']:
            pr = resample(pr)
            print('\nresampling done!')

        # meter to cell
        rob_cell_x, rob_cell_y = mToCell(pr['best'][:, 0], pr['best'][:, 1],
                                         map)

        # visulization
        # occupancy grids
        display = (map['occu_grids']).copy()
        displayrgb = np.tile(display[:, :, np.newaxis], (1, 1, 3))
        displayrgb = (displayrgb + 100) / 200 * 255
        displayrgb = displayrgb.astype('uint8')
        # texts
        font = cv2.FONT_HERSHEY_SIMPLEX
        bottomLeftCornerOfText = (20, 20)
        fontScale = 0.4
        fontColor = (255, 255, 255)
        cv2.putText(displayrgb,
                    'Time passed: ' + str(
                        (lidar_new[i]['t'][0, :] - lidar_new[0]['t'][0, :])) +
                    '   Robot pose: ' + str(pr['best'][0, :]),
                    bottomLeftCornerOfText,
                    font,
                    fontScale,
                    fontColor,
                    thickness=1)
        # ray ends of each scan
        #displayrgb[ray_cell_x, ray_cell_y, 0] = 255
        #displayrgb[ray_cell_x, ray_cell_y, 1]= 0
        #displayrgb[ray_cell_x, ray_cell_y, 2] = 0

        # all particles
        '''
		for p in range(pr['num']):
			# meter to cell
			cell_x, cell_y = mToCell(pr['odom'][p, 0], pr['odom'][p, 1], map)
			cv2.circle(displayrgb, (cell_y, cell_x), 1, (255, 0, 255), -11)
		'''

        # raw odometry
        arrowlen = 7
        raw_cell_x, raw_cell_y = mToCell(lidar_new[i]['pose'][:, 0],
                                         lidar_new[i]['pose'][:, 1], map)
        cv2.circle(displayrgb, (raw_cell_y, raw_cell_x), 5, (255, 10, 0), -11)
        end_x_raw = raw_cell_x + arrowlen * cos(lidar_new[i]['pose'][:, 2])
        end_y_raw = raw_cell_y + arrowlen * sin(lidar_new[i]['pose'][:, 2])
        cv2.arrowedLine(displayrgb, (raw_cell_y, raw_cell_x), \
            (int(end_y_raw), int(end_x_raw)), (255, 10, 0), 5)

        # optimized odometry (best particle)
        cv2.circle(displayrgb, (rob_cell_y, rob_cell_x), 5, (0, 10, 255), -11)
        end_x = rob_cell_x + arrowlen * cos(pr['best'][:, 2])
        end_y = rob_cell_y + arrowlen * sin(pr['best'][:, 2])
        cv2.arrowedLine(displayrgb, (rob_cell_y, rob_cell_x), \
            (int(end_y), int(end_x)), (0, 10, 255), 5)

        #display = cv2.applyColorMap(display, cv2.COLORMAP_BONE)
        cv2.imshow(videoname, displayrgb)
        key = cv2.waitKey(100)
        video.write(displayrgb)

    video.release()
    cv2.destroyAllWindows()
def main():
    # Load lidar and joint data
    lidar_dat = ld.get_lidar('data/train_lidar3')
    joint_dat = ld.get_joint('data/train_joint3')

    # Match the joint index to lidar data
    joint_ind = []
    lidar_t = lidar_dat[0]['t']
    for i in range(len(lidar_dat)):
        joint_ind.append(np.argmin(np.abs(lidar_dat[i]['t'] - joint_dat['ts'])))
        lidar_t = np.vstack((lidar_t, lidar_dat[i]['t']))

    np.save('time_lidar.npy', lidar_t) # save time line of lidar

    # Pick neck and head angles at only that timestep
    neck = joint_dat['head_angles'][0, joint_ind]
    head = joint_dat['head_angles'][1, joint_ind]
    t = joint_dat['ts'][:, joint_ind]

    # Initialize
    pos0 = np.array([0, 0, 0])
    q0 = np.array([1, 0, 0, 0])
    rob = robot(pos0, q0)
    t_start = 0

    pos_arr = np.zeros((1, 2))
    q_arr = np.array([1, 0, 0, 0])
    T_arr = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])

    for i in range(t_start, t.shape[1] - 1):

        # Find the current pose
        T_cur = h.qp2homo(rob.q, rob.pos)

        # Odometry to get postion action
        # Get rot_act from rpy
        pos_next = lidar_dat[i + 1]['pose'][0]
        pos_next = np.array([pos_next[0], pos_next[1], 0])

        rpy_next = lidar_dat[i + 1]['rpy'][0]
        q_next = h.rpy2quat(rpy_next[0], rpy_next[1], rpy_next[2])
        T_next = h.qp2homo(q_next, pos_next)

        # Get the control action
        T_act = np.dot(np.linalg.inv(T_cur), T_next)
        q_act, pos_act = h.homo2qp(T_act)
        # motion model
        rob.predict(q_act, pos_act)
        # update pos and quaternion array
        pos_arr = np.vstack((pos_arr, np.array([rob.pos[0], rob.pos[1]])))
        q_arr = np.vstack((q_arr, np.array(rob.q)))
        # update homogeneous transformation array
        T_cur = np.linalg.inv(h.Tcam2w(rob, head[i], neck[i]))
        T_arr = np.vstack((T_arr, T_cur))

    # store data
    np.save('pos_pred.npy', pos_arr)
    np.save('q_pred.npy', q_arr)
    np.save('T_pred.npy', T_arr)
    # show the predicted trajectory
    fig1 = plt.figure(1)
    plt.plot(pos_arr[:, 0], pos_arr[:, 1])
    plt.show()
            while resampleid[0, ip] > cumlatedWeight[0, ind]:
                ind += 1

            # use this index
            inds.append(ind)

        particle = particle[:, inds]

        # init weight for resampled particles
        weight = np.zeros((1, Np)) + 1.0 / Np

    return particle, weight


if __name__ == "__main__":
    l0 = ld.get_lidar('lidar/train_lidar4')
    l0_len = len(l0)
    U = mapping.get_body_pose_inWorld(l0)

    # use one data every 20 data
    segment = 15
    j0 = ld.get_joint('joint/train_joint4')

    # 25 particles
    Np = 25

    #init particles, weights, map, control input, pose and motion trjectory
    particle = np.zeros((3, Np))
    weight = np.ones((1, Np)) / Np
    px = U[:, 0:1]
    px_h = px
Example #18
0
    body2world_pose = np.hstack((body2world_rot,np.array([lidar_pose[0],lidar_pose[1],0.93]).reshape(3,1)))
    body2world_pose = np.vstack((body2world_pose,np.array([0,0,0,1]).reshape(1,4)))
      

    body_pose = head2body_pose @ lid2head_pose @ lidar_scan
    
    ###### Filtering Out Ground Points ######
    body_pose_T = body_pose.T
    body_pose_corrected = body_pose_T[np.where(body_pose[2,:] > 0)]
    body_pose_corrected = body_pose_corrected.T
    
    return body2world_pose @ body_pose_corrected 




if __name__ == '__main__':
    lidar_data = get_lidar("lidar/train_lidar0")
    #Pose is already in world frame and scan has to be shifted to world frame
    print("Read Lidar Data")

    MAP = init_map()
    print("Map intialized")
    #lidar_data = sorted(lidar_data.items(),key = lambda k:  k['t'][0])
    #print("Sorted Lidar Data")
    odometry_data = get_joint("joint/train_joint0")
    print("Loaded Odometry data")

    slam(lidar_data,odometry_data,MAP)

Example #19
0
def createMap(fileNumber):
    lidarFilename = 'data/train_lidar'+fileNumber
    jointFileName = 'data/train_joint'+fileNumber
    lidarData = ld.get_lidar(lidarFilename)
    jointData = ld.get_joint(jointFileName)

    #find all the joint times
    allJointTimes = jointData['ts'][0]

    #find all the head angles from joint data
    headAngles = jointData['head_angles']

    #get the total number of timestamps
    numTimeStamps = len(lidarData)

    # init MAP
    MAP = {}
    MAP['res'] = 0.05  # meters
    MAP['xmin'] = -20  # meters
    MAP['ymin'] = -20
    MAP['xmax'] = 20
    MAP['ymax'] = 20
    MAP['sizex'] = int(np.ceil((MAP['xmax'] - MAP['xmin']) / MAP['res'] + 1))  # cells
    MAP['sizey'] = int(np.ceil((MAP['ymax'] - MAP['ymin']) / MAP['res'] + 1))
    MAP['map'] = np.zeros((MAP['sizex'], MAP['sizey']), dtype=np.double)  # DATA TYPE: char or int8

    deadReckoningPoses = deadReckoning(fileNumber)

    for i in range(0,numTimeStamps,100):
        #load the data for this timestamp
        dataI = lidarData[i]

        #find the thetas
        theta = np.array([np.arange(-135,135.25,0.25)*np.pi/180.])

        #get the scan at this point
        di = np.array(dataI['scan'])

        # take valid indices
        # indValid = np.logical_and((di < 30), (di > 0.1))
        # theta = theta[indValid]
        # di = di[indValid]

        #find the position of the head at this time
        rHead = np.vstack([di * np.cos(theta), di * np.sin(theta), np.zeros((1,1081)),np.ones((1,1081))])

        #find the time stamp
        ts = dataI['t'][0][0]

        #find the closest joint based on the absolute time
        idx = findIndexOfCloestTimeFrame(allJointTimes,ts)

        #find the head angles of the closest times
        neckAngle = headAngles[0][idx]
        headAngle = headAngles[1][idx]

        #find the position of the body at this time
        dNeck = .15
        rotNeck = rotzHomo(neckAngle,0,0,dNeck)
        rotHead = rotyHomo(headAngle)
        rBody = np.dot(np.dot(rotNeck,rotHead),rHead)

        #get the body roll and pitch angles
        rollBody = jointData['rpy'][0][i]
        pitchBody = jointData['rpy'][1][i]

        rotBodyRoll = rotxHomo(rollBody,0,0,0)
        rotBodyPitch = rotyHomo(pitchBody)

        #apply the body roll and pitch angles
        rBody = np.dot(rotBodyPitch,np.dot(rotBodyRoll,rBody))

        #get the dead recokoned poses
        xPose = deadReckoningPoses[i][0]
        yPose = deadReckoningPoses[i][1]
        thetaPose = deadReckoningPoses[i][2]

        #find the yaw and pitch angle of the lidar
        rpy = np.array(dataI['rpy']).T
        yawAngle = rpy[2]

        #convert from the body frame to the global frame
        dBody = .93 + .33
        rotGlobal = rotzHomo(yawAngle,xPose,yPose,dBody)
        rGlobal = np.dot(rotGlobal,rBody)

        #remove the data of the lidar hitting the ground
        rGlobal = rGlobal[:,(rGlobal[2,:] >= 1E-4)]

        #convert to cells
        xs0 = rGlobal[0,:]
        ys0 = rGlobal[1,:]
        xis = np.ceil((xs0 - MAP['xmin']) / MAP['res']).astype(np.int16) - 1
        yis = np.ceil((ys0 - MAP['ymin']) / MAP['res']).astype(np.int16) - 1

        xPose = np.ceil((xPose - MAP['xmin']) / MAP['res']).astype(np.int16) - 1
        yPose = np.ceil((yPose - MAP['xmin']) / MAP['res']).astype(np.int16) - 1

        #run getMapCellsFromRay to get points that are unoccupied
        cells = getMapCellsFromRay(xPose,yPose,xis,yis)
        xsFree = np.int_(cells[0,:])
        ysFree = np.int_(cells[1,:])

        #find all the occupied cells
        xsOcc = xis
        ysOcc = yis

        #increase log odds of unoccupied cells to the map with log odds
        indGood = np.logical_and(np.logical_and(np.logical_and((xsFree > 1), (ysFree > 1)), (xsFree < MAP['sizex'])), (ysFree < MAP['sizey']))
        logOddsStepDecease = .008
        MAP['map'][xsFree[indGood], ysFree[indGood]] = MAP['map'][xsFree[indGood], ysFree[indGood]] - logOddsStepDecease

        #decrease log odds of occupied cells
        indGoodOcc = np.logical_and(np.logical_and(np.logical_and((xsOcc > 1), (ysOcc > 1)), (xsOcc < MAP['sizex'])), (ysOcc < MAP['sizey']))
        logOddsStepIncrease = .05
        MAP['map'][xsOcc[indGoodOcc], ysOcc[indGoodOcc]] = MAP['map'][xsOcc[indGoodOcc], ysOcc[indGoodOcc]] + logOddsStepIncrease

    #show the map
    plt.imshow(MAP['map'], cmap="hot");
    posesX = np.ceil((deadReckoningPoses[:,0] - MAP['xmin']) / MAP['res']).astype(np.int16) - 1
    posesY = np.ceil((deadReckoningPoses[:,1] - MAP['ymin']) / MAP['res']).astype(np.int16) - 1

    #show the dead reckoned poses
    plt.scatter(posesX,posesY)
    plt.show()
Example #20
0
# specify dataset suffix, set to str() if dataset has no numeric suffix
dataset = str(1)

# specify prefix of folder to read file from. E.g. set to "train" for "trainset" folder
folder_prefix = "train"

####################################################################################################
#                                           LOAD DATA                                              #
####################################################################################################

# load lidar and joint data
folder = folder_prefix + "set"
print "\nLoading lidar data..."
lidar_filename = folder + "/lidar/" + folder_prefix + "_lidar" + len(
    dataset) * dataset
lidars = load.get_lidar(lidar_filename)
print "Done!\n"
print "Loading joint data..."
joint_filename = folder + "/joint/" + folder_prefix + "_joint" + len(
    dataset) * dataset
joint = load.get_joint(joint_filename)
print "Done!\n"

# load RGB and DEPTH data
print "Attempting to load RGB and DEPTH data..."
rgb_exists = False
RGBs = []
depths = []
try:
    load_depth = True
    RGB_folder = folder + "/cam"
Example #21
0
# Jinwook Huh

import load_data as ld
import p3_util as ut

acc_x, acc_y, acc_z, gyro_x, gyro_y, gyro_z, imu_ts	 = ld.get_imu('imuRaw20')

FR, FL, RR, RL,enc_ts = ld.get_encoder('Encoders20')

lidar = ld.get_lidar('Hokuyo20')
ut.replay_lidar(lidar)
Example #22
0
def texture_mapping(MAP, w_T_b_best):

    IRCalib = getIRCalib()
    Matrix_IRCalib = np.array([[
        IRCalib['fc'][0], IRCalib['ac'] * IRCalib['fc'][0], IRCalib['cc'][0]
    ], [0, IRCalib['fc'][1], IRCalib['cc'][1]], [0, 0, 1]])
    RGBCalib = getRGBCalib()
    Matrix_RGBCalib = np.array([[
        RGBCalib['fc'][0], RGBCalib['ac'] * RGBCalib['fc'][0],
        RGBCalib['cc'][0]
    ], [0, RGBCalib['fc'][1], RGBCalib['cc'][1]], [0, 0, 1]])
    exIR_RGB = getExtrinsics_IR_RGB()

    l0 = get_lidar("lidar/train_lidar0")
    r0 = get_rgb("cam/RGB_0")
    d0 = get_depth("cam/DEPTH_0")

    lidar_t = []
    depth_t = []
    Tneck = np.zeros((4, 4))
    Thead = np.zeros((4, 4))

    for i in range(len(l0)):
        lidar_t1 = np.array(l0[i]['t'][0])
        lidar_t.append(lidar_t1)

    for j in range(len(d0)):
        depth_t1 = np.array(d0[j]['t'][0])
        depth_t.append(depth_t1)

    rgb_it = [r['image'] for r in r0]
    rgb_angle = [r['head_angles'] for r in r0]
    rgb_neck = rgb_angle[0]
    rgb_head = rgb_angle[1]

    depth_dt = [d['depth'] for d in d0]

    ############## do the time alignment for lidar time compare with depth time
    xxx = np.array((lidar_t))
    yyy = np.array((depth_t))

    xx = np.array(([l[0] for l in xxx]))
    yy = np.array(([l[0] for l in yyy]))
    idx1 = np.searchsorted(xx, yy, side="left").clip(max=xx.size - 1)
    mask = (idx1 > 0) & \
           ((idx1 == len(xx)) | (np.fabs(yy - xx[idx1 - 1]) < np.fabs(yy - xx[idx1])))
    ##### the index is the resulting lidar timestamp index that has the closest time step as the depth's timestamp
    index1 = np.array((idx1 - mask)).tolist()

    #print(index)

    # transform from ir to rgb frame rgb T_ir
    rgb_T_ir = np.zeros((4, 4))
    rgb_T_ir[0:3, 0:3] = exIR_RGB['rgb_R_ir']
    rgb_T_ir[0, 3] = exIR_RGB['rgb_T_ir'][0]
    rgb_T_ir[1, 3] = exIR_RGB['rgb_T_ir'][1]
    rgb_T_ir[2, 3] = exIR_RGB['rgb_T_ir'][2]
    rgb_T_ir[3, 3] = 1

    # tranform from rgb to body frame  b_T_rgb

    # use the idxs to track the
    Rn = euler2mat(0, 0, rgb_neck, axes='sxyz')
    Tneck[0:3, 0:3] = Rn
    Tneck[0, 3] = 0
    Tneck[1, 3] = 0
    Tneck[2, 3] = 0.07
    Tneck[3, 3] = 1
    Rh = euler2mat(0, rgb_head, 0, axes='sxyz')
    Thead[0:3, 0:3] = Rh  # transform from lidar to body frame
    Thead[0, 3] = 0
    Thead[1, 3] = 0
    Thead[2, 3] = 0.33
    Thead[3, 3] = 1
    b_T_rgb = np.dot(Thead, Tneck)

    # w_T_b is the current best particle
    w_T_c_rgb = np.dot(w_T_b_best, b_T_rgb)

    o_T_c = np.array([[0, -1, 0, 0], [0, 0, -1, 0], [1, 0, 0, 0], [0, 0, 0,
                                                                   1]])

    map_x = MAP['sizex']
    map_y = MAP['sizey']

    my_x = map_x.tolist()
    my_y = map_y.tolist()

    tmt = np.zeros((MAP['sizex'], MAP['sizey'], 3))

    #tmt[my_x, my_y, 0] =
    #tmt[my_x, my_y, 1] =
    #tmt[my_x, my_y, 2] =

    return tmt
Example #23
0
def SLAM(lidarFilepath, jointFilepath):
    #get the data
    lidarData = ld.get_lidar(lidarFilepath)
    jointData = ld.get_joint(jointFilepath)

    #find all the joint times
    allJointTimes = jointData['ts'][0]

    #find all the head angles from joint data
    headAngles = jointData['head_angles']

    #get the total number of timestamps
    numTimeStamps = len(lidarData)

    # init MAP
    MAP = {}
    MAP['res'] = 0.05  # meters
    MAP['xmin'] = -20  # meters
    MAP['ymin'] = -20
    MAP['xmax'] = 20
    MAP['ymax'] = 20
    MAP['sizex'] = int(np.ceil((MAP['xmax'] - MAP['xmin']) / MAP['res'] +
                               1))  # cells
    MAP['sizey'] = int(np.ceil((MAP['ymax'] - MAP['ymin']) / MAP['res'] + 1))
    MAP['map'] = np.zeros((MAP['sizex'], MAP['sizey']),
                          dtype=np.double)  # DATA TYPE: char or int8

    # x-positions of each pixel of the map
    x_im = np.arange(MAP['xmin'], MAP['xmax'] + MAP['res'], MAP['res'])
    # y-positions of each pixel of the map
    y_im = np.arange(MAP['ymin'], MAP['ymax'] + MAP['res'], MAP['res'])

    # find the thetas
    theta = np.array([np.arange(-135, 135.25, 0.25) * np.pi / 180.])

    # number of particles
    numParticles = 150

    # instantiate the particles
    particlePoses = np.zeros((numParticles, 3))
    weights = (1.0 / numParticles) * np.ones((numParticles, ))

    fig = plt.figure()
    im = plt.imshow(MAP['map'], cmap="hot", animated=True)

    #all predicted robot positions
    finalPoses = np.zeros((numTimeStamps, 4))

    stride = 300

    for i in range(0, numTimeStamps - stride, stride):
        #load the data for this timestamp
        dataI = lidarData[i]

        #get the scan at this point
        di = np.array(dataI['scan'])

        # take valid indices
        # indValid = np.logical_and((di < 30), (di > 0.1))
        # theta = theta[indValid]
        # di = di[indValid]

        #find the position of the head at this time
        rHead = np.vstack([
            di * np.cos(theta), di * np.sin(theta),
            np.zeros((1, 1081)),
            np.ones((1, 1081))
        ])

        #find the time stamp
        ts = dataI['t'][0][0]

        #find the closest joint based on the absolute time
        idx = findIndexOfCloestTimeFrame(allJointTimes, ts)

        #find the head angles of the closest times
        neckAngle = headAngles[0][idx]
        headAngle = headAngles[1][idx]

        #find the position of the body at this time
        dNeck = .15
        rotNeck = rotzHomo(neckAngle, 0, 0, dNeck)
        rotHead = rotyHomo(headAngle)
        rBody = np.dot(np.dot(rotNeck, rotHead), rHead)

        #get the body roll and pitch angles
        rollBody = jointData['rpy'][0][i]
        pitchBody = jointData['rpy'][1][i]

        rotBodyRoll = rotxHomo(rollBody, 0, 0, 0)
        rotBodyPitch = rotyHomo(pitchBody)

        #apply the body roll and pitch angles
        rBody = np.dot(rotBodyPitch, np.dot(rotBodyRoll, rBody))

        #get the pose of the best particle p*
        indexOfBest = np.argmax(weights)
        position = particlePoses[indexOfBest, :]

        #get the position of the best particles
        xPose = position[0]
        yPose = position[1]
        thetaPose = position[2]

        #add the current prediction of x y to numTimeStepsx2 matrix to plot later
        finalPoses[i][0] = ts
        finalPoses[i][1] = xPose
        finalPoses[i][2] = yPose
        finalPoses[i][3] = thetaPose

        #find the yaw and pitch angle of IMU rpy
        rpy = np.array(dataI['rpy']).T
        yawAngle = rpy[2]

        #convert from the body frame to the global frame
        dBody = .93 + .33
        rotGlobal = rotzHomo(yawAngle, xPose, yPose, dBody)
        rGlobal = np.dot(rotGlobal, rBody)

        #remove the data of the lidar hitting the ground
        rGlobal = rGlobal[:, (rGlobal[2, :] >= 1E-4)]

        #convert to cells
        xs0 = rGlobal[0, :]
        ys0 = rGlobal[1, :]
        xis = np.ceil((xs0 - MAP['xmin']) / MAP['res']).astype(np.int16) - 1
        yis = np.ceil((ys0 - MAP['ymin']) / MAP['res']).astype(np.int16) - 1

        xPose = np.ceil(
            (xPose - MAP['xmin']) / MAP['res']).astype(np.int16) - 1
        yPose = np.ceil(
            (yPose - MAP['xmin']) / MAP['res']).astype(np.int16) - 1

        #run getMapCellsFromRay to get points that are unoccupied
        cells = getMapCellsFromRay(xPose, yPose, xis, yis)
        xsFree = np.int_(cells[0, :])
        ysFree = np.int_(cells[1, :])

        #find all the occupied cells
        xsOcc = xis
        ysOcc = yis

        #increase log odds of unoccupied cells to the map with log odds
        indGood = np.logical_and(
            np.logical_and(np.logical_and((xsFree > 1), (ysFree > 1)),
                           (xsFree < MAP['sizex'])), (ysFree < MAP['sizey']))
        logOddsStepDecease = 1  #np.log(.8/.3)
        MAP['map'][xsFree[indGood], ysFree[indGood]] = MAP['map'][
            xsFree[indGood], ysFree[indGood]] - logOddsStepDecease

        #decrease log odds of occupied cells
        indGoodOcc = np.logical_and(
            np.logical_and(np.logical_and((xsOcc > 1), (ysOcc > 1)),
                           (xsOcc < MAP['sizex'])), (ysOcc < MAP['sizey']))
        logOddsStepIncrease = 3  #np.log(.7/.2)#.05
        MAP['map'][xsOcc[indGoodOcc], ysOcc[indGoodOcc]] = MAP['map'][
            xsOcc[indGoodOcc], ysOcc[indGoodOcc]] + logOddsStepIncrease

        #do localization prediction
        # get the odometry for the i data
        odomCurr = np.array(dataI['pose']).T
        xOdom = odomCurr[0][0]
        yOdom = odomCurr[1][0]
        thetaOdom = odomCurr[2][0]
        currOdomPose = np.array([xOdom, yOdom]).T

        # get the odometry for the i+1 data
        odomNext = np.array(lidarData[i + stride]['pose']).T
        nextXOdom = odomNext[0][0]
        nextYOdom = odomNext[1][0]
        nextThetaOdom = odomNext[2][0]
        nextOdomPose = np.array([nextXOdom, nextYOdom]).T

        #do prediction step for particles to get particle posotions at t+1
        particlePoses = deadReckoning(particlePoses, currOdomPose, thetaOdom,
                                      nextOdomPose, nextThetaOdom)

        #do localization update and get updated weights for particles
        for particleI in range(0, numParticles):
            #get particleI at time t+1
            particle = particlePoses[particleI, :]
            #get x and y positions of particle at time t+1
            particleX = particle[0]
            particleY = particle[1]

            #create a 9x9 window around the particle position
            x_range = np.arange(particleX - 0.2, particleX + 0.2, 0.05)
            y_range = np.arange(particleY - 0.2, particleY + 0.2, 0.05)

            #get the correlation between window and the map
            corr = mapCorrelation(MAP['map'], x_im, y_im, rGlobal[0:3, :],
                                  x_range, y_range)
            maxCorr = np.max(corr[:])
            oldWeight = weights[particleI]

            #get the new weight
            newWeight = oldWeight * maxCorr

            #update new weight in weights
            weights[particleI] = newWeight

        #normalize the weights
        weights = weights / np.sum(weights)

        #resample the particles
        Neff = 1.0 / np.sum(weights**2)
        Nthresh = 20
        #do uniform sampling
        if Neff < Nthresh:
            weights = (1.0 / numParticles) * np.ones((numParticles, ))

        #if you want to visualize progress happening
        #print i

    #show the map
    plt.imshow(MAP['map'], cmap="hot")

    #show the localized path
    posesX = np.ceil(
        (finalPoses[:, 1] - MAP['xmin']) / MAP['res']).astype(np.int16) - 1
    posesY = np.ceil(
        (finalPoses[:, 2] - MAP['ymin']) / MAP['res']).astype(np.int16) - 1
    plt.scatter(posesX, posesY, s=2)

    #show the plot
    plt.show()

    #save the poses and map as a pickle file
    with open('allPoses.pickle', 'wb') as handle:
        pickle.dump(finalPoses, handle, protocol=pickle.HIGHEST_PROTOCOL)

    with open('finalMap.pickle', 'wb') as handle:
        pickle.dump(MAP, handle, protocol=pickle.HIGHEST_PROTOCOL)
Example #24
0
    #Specify map properties
    resolution = .05
    xmin = -40
    ymin = -40
    xmax = 40
    ymax = 40
    numParticle = 10
    N_threshold = 35
    yRange = np.arange(-resolution, resolution + 0.01, resolution)
    xRange = np.arange(-resolution, resolution + 0.01, resolution)
    depthFolder = 'C:/Users/Baoqian Wang/Google Drive/cam/DEPTH_0'
    rgbFolder = 'C:/Users/Baoqian Wang/Google Drive/cam/RGB_0'
    depthImageList = ld.get_depth(depthFolder)
    rgbImageList = ld.get_rgb(rgbFolder)
    #Initialize ParticleSlam object
    particleSlam = ParticleFilterSlam(numParticle, N_threshold, resolution,
                                      xmin, ymin, xmax, ymax, xRange, yRange)

    # Get lidar data
    lidar = ld.get_lidar("./lidar/train_lidar0")

    # Get the head angles data
    joint = ld.get_joint("./joint/train_joint0")
    IRcalibration = ld.getIRCalib()
    # Run the SLAM
    MAP, bestParticles = particleSlam.slam(lidar, joint, rgbImageList,
                                           depthImageList)

    #fig1 = plt.figure(2)
    #plt.imshow(MAP['show_map'], cmap = "hot")
    #plt.show()
Example #25
0
        R = rgb_data[k]['image']
        R = np.fliplr(R)
        plt.imshow(R)
        plt.draw()
        plt.pause(0.001)


def get_joint_index(joint):
    jointNames = [
        'Neck', 'Head', 'ShoulderL', 'ArmUpperL', 'LeftShoulderYaw',
        'ArmLowerL', 'LeftWristYaw', 'LeftWristRoll', 'LeftWristYaw2',
        'PelvYL', 'PelvL', 'LegUpperL', 'LegLowerL', 'AnkleL', 'FootL',
        'PelvYR', 'PelvR', 'LegUpperR', 'LegLowerR', 'AnkleR', 'FootR',
        'ShoulderR', 'ArmUpperR', 'RightShoulderYaw', 'ArmLowerR',
        'RightWristYaw', 'RightWristRoll', 'RightWristYaw2', 'TorsoPitch',
        'TorsoYaw', 'l_wrist_grip1', 'l_wrist_grip2', 'l_wrist_grip3',
        'r_wrist_grip1', 'r_wrist_grip2', 'r_wrist_grip3', 'ChestLidarPan'
    ]
    joint_idx = 1
    for (i, jnames) in enumerate(joint):
        if jnames in jointNames:
            joint_idx = i
            break
    return joint_idx


if __name__ == "__main__":
    train_joint3 = ld.get_joint("data/train/train_joint3")
    train_lidar0 = ld.get_lidar("data/train/train_lidar0")
    replay_lidar(train_lidar0)
Example #26
0
        ly = (np.ceil((laser_Wpose[1][i] - ymin) / 0.05)).astype(np.int16) - 1
        bx = (np.ceil((body_Wpose[0][0] - xmin) / 0.05)).astype(np.int16) - 1
        by = (np.ceil((body_Wpose[1][0] - ymin) / 0.05)).astype(np.int16) - 1

        # use bresenham algorithm to get free cells
        r = p2.bresenham2D(bx, by, lx, ly)

        # log-odds
        map[lx, ly] += np.log(4)
        map[r[0].astype(np.int16), r[1].astype(np.int16)] += np.log(0.8)

    return map


if __name__ == "__main__":
    l0 = ld.get_lidar("lidar/train_lidar2")
    j0 = ld.get_joint("joint/train_joint2")
    body_Wpose = get_body_pose_inWorld(l0)
    map = get_Occmap(l0, j0, -20, 20, -20, 20, 0.05, 0, body_Wpose[:, 0:1])
    map_index = np.where(map > 0)
    map_indexn = np.where(map < 0)
    map_vi = np.zeros(map.shape)
    map_vi[map_index] = 1
    map_vi[map_indexn] = 0
    plt.imshow(map_vi, cmap="hot")
    plt.title("Occupancy map")
    plt.xlabel('y--0.05m/cell')
    plt.ylabel('x--0.05m/cell')
    plt.show()
    sns.heatmap(map, center=0, vmin=-5, vmax=5)
    plt.xlabel('y--0.05m/cell')
Example #27
0
import load_data as ld
import numpy as np
from SLAM_helper import *
import matplotlib.pyplot as plt
import MapUtils as maput
import cv2
import random

####Dataset####

joint = ld.get_joint("data/train_joint2")
lid = ld.get_lidar("data/train_lidar2")

itv = 1  # of skip in drawing maps

###############

angles = np.array([np.arange(-135, 135.25, 0.25) * np.pi / 180.0])

N, N_threshold = 100, 35
particles = np.zeros((N, 3))
weight = np.einsum('..., ...', 1.0 / N, np.ones((N, 1)))

mapfig = {}
mapfig['res'] = 0.05
mapfig['xmin'] = -40
mapfig['ymin'] = -40
mapfig['xmax'] = 40
mapfig['ymax'] = 40
mapfig['sizex'] = int(
    np.ceil((mapfig['xmax'] - mapfig['xmin']) / mapfig['res'] + 1))
Example #28
0
def load_obj(name):
    with open('obj/' + name + '.pkl', 'rb') as f:
        return pickle.load(f)

def roundup(x,t_jump):
    return int(m.ceil(x / t_jump)) * t_jump

video_gen = True

# Set start time and step size
t_start = 0
t_jump = 10

# Load lidar and joint data
lidar_dat = ld.get_lidar('data/test_lidar')
joint_dat = ld.get_joint('data/test_joint')

# theta = np.arange(0,270.25,0.25)*np.pi/float(180)
# ax = plt.subplot(111, projection='polar')
# ax.plot(theta, lidar_dat[0]['scan'][0])
# ax.set_rmax(10)
# ax.set_rticks([0.5, 1, 1.5, 2])  # less radial ticks
# ax.set_rlabel_position(-22.5)  # get radial labels away from plotted line
# ax.grid(True)
# ax.set_title("Lidar scan data", va='bottom')
# plt.show()
# quit()

if video_gen:
    img_dat1 = ld.get_rgb('data/test/RGB_1')
def deadReckoning(fileNumber,noise = np.zeros((3,1))):
    lidarFilename = 'data/train_lidar' + fileNumber
    lidarData = ld.get_lidar(lidarFilename)

    # get the total number of timestamps
    numTimeStamps = len(lidarData)

    #instantiate initial guess of X and of theta
    initPose = np.array(lidarData[0]['pose']).T
    xMinus = np.array([initPose[0][0], initPose[1][0], 0]).T
    thetaMinus = initPose[2][0]

    allPts = np.empty((numTimeStamps,3))

    for i in range(1, numTimeStamps-1):
        #load the data for this timestamp
        dataI = lidarData[i]

        #get the pose at time t
        pose = np.array(dataI['pose']).T
        xPose = pose[0][0]
        yPose = pose[1][0]
        thetaPose = pose[2][0]
        pt = np.array([xPose, yPose,0]).T

        #get the pose at time t+1
        nextPose = np.array(lidarData[i+1]['pose']).T
        nextXPose = nextPose[0][0]
        nextYPose = nextPose[1][0]
        nextThetaPose = nextPose[2][0]
        nextPt = np.array([nextXPose, nextYPose,0]).T

        #get the delta pose and delta thetas
        diffVectors = nextPt - pt
        rotMatrix = rotz(thetaPose)
        deltaX = np.dot(rotMatrix.T,diffVectors)
        deltaTheta = nextThetaPose - thetaPose

        #now go back to global
        xPlus = np.dot(rotz(thetaMinus),deltaX)+ xMinus
        thetaPlus = thetaMinus + deltaTheta
        xNew = xPlus[0]
        yNew = xPlus[1]

        #add the noise
        thetaPlus = thetaPlus + noise[2,0]
        xPlus = xPlus + np.hstack((noise[0:2,0],0))

        #set thetaPlus equal to thetaMinus and
        thetaMinus = thetaPlus
        xMinus = xPlus

        #return the allpts
        allPts[i,0] = xNew
        allPts[i,1] = yNew
        allPts[i,2] = thetaMinus

    # plt.scatter(allPts[:,0],allPts[:,1],s=1)
    # plt.show()

    return allPts
Example #30
0
#x_range=np.arange(-0.2,0.2+0.05,0.05)
#y_range=np.arange(-0.2,0.2+0.05,0.05)  # for training set

x_im = np.arange(MAP['xmin'], MAP['xmax'] + MAP['res'], MAP['res'])
y_im = np.arange(MAP['ymin'], MAP['ymax'] + MAP['res'], MAP['res'])

N = 100  #change to 200 particles for training set 2 to get better map
timestep = 100  # pick small value,i.e. 20 for the timestep, will get a pretty clear map but with a long time

trajectory = []

##################################### Read data ########################################

j0 = get_joint("joint/train_joint0")
l0 = get_lidar("lidar/train_lidar0")
#  r0 = get_rgb("cam/RGB_0")
#  d0 = get_depth("cam/DEPTH_0")
#  exIR_RGB = getExtrinsics_IR_RGB()
#  IRCalib = getIRCalib()
#  RGBCalib = getRGBCalib()

# visualize data
#  replay_lidar(l0)
#  replay_rgb(r0)

l0_pose = [l['pose'] for l in l0]
l0_scan = [l['scan'] for l in l0]
j0_angle = j0['head_angles']
j0_neck = j0_angle[0]
j0_head = j0_angle[1]