Ejemplo n.º 1
0
def conf_thresholding(data_dir, save_dir):
	# collect all trajectories
	tracker_id_score = dict()
	eval_dir = os.path.join(data_dir, 'data')
	seq_list, num_seq = load_list_from_folder(eval_dir)
	for seq_file in seq_list:
		seq_data, num_line = load_txt_file(seq_file)
		for data_line in seq_data:
			data_split = data_line.split(' ')
			score_tmp = float(data_split[-1])
			id_tmp = int(data_split[1])

			if id_tmp not in tracker_id_score.keys():
				tracker_id_score[id_tmp] = list()
			tracker_id_score[id_tmp].append(score_tmp)

	# collect the ID to remove based on the confidence
	to_delete_id = list()
	for track_id, score_list in tracker_id_score.items():
		average_score = sum(score_list) / float(len(score_list))
		if average_score < score_threshold:
			to_delete_id.append(track_id)

	# remove the ID in the data folder
	save_dir_tmp = os.path.join(save_dir, 'data'); mkdir_if_missing(save_dir_tmp)
	for seq_file in seq_list:
		seq_name = fileparts(seq_file)[1]
		seq_file_save = os.path.join(save_dir_tmp, seq_name+'.txt'); seq_file_save = open(seq_file_save, 'w')

		seq_data, num_line = load_txt_file(seq_file)
		for data_line in seq_data:
			data_split = data_line.split(' ')
			id_tmp = int(data_split[1])
			if id_tmp not in to_delete_id:
				seq_file_save.write(data_line + '\n')
	
		seq_file_save.close()

	# remove the ID in the trk with id folder
	trk_id_dir = os.path.join(data_dir, 'trk_withid')
	seq_dir_list, num_seq = load_list_from_folder(trk_id_dir)
	save_dir_tmp = os.path.join(save_dir, 'trk_withid')
	for seq_dir in seq_dir_list:
		frame_list, num_frame = load_list_from_folder(seq_dir)
		seq_name = fileparts(seq_dir)[1]
		save_frame_dir = os.path.join(save_dir_tmp, seq_name); mkdir_if_missing(save_frame_dir)
		for frame in frame_list:
			frame_index = fileparts(frame)[1]
			frame_file_save = os.path.join(save_frame_dir, frame_index+'.txt'); frame_file_save = open(frame_file_save, 'w')	
			frame_data, num_line = load_txt_file(frame)
			for data_line in frame_data:
				data_split = data_line.split(' ')
				id_tmp = int(data_split[-1])
				if id_tmp not in to_delete_id:
					frame_file_save.write(data_line + '\n')

			frame_file_save.close()
def vis(result_sha, data_root, result_root):
	def show_image_with_boxes(img, objects_res, object_gt, calib, save_path, height_threshold=0):
		img2 = np.copy(img) 

		for obj in objects_res:
			box3d_pts_2d, _ = compute_box_3d(obj, calib.P)
			color_tmp = tuple([int(tmp * 255) for tmp in colors[obj.id % max_color]])
			img2 = draw_projected_box3d(img2, box3d_pts_2d, color=color_tmp)
			text = 'ID: %d' % obj.id
			if box3d_pts_2d is not None:
				img2 = cv2.putText(img2, text, (int(box3d_pts_2d[4, 0]), int(box3d_pts_2d[4, 1]) - 8), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color=color_tmp) 

		img = Image.fromarray(img2)
		img = img.resize((width, height))
		img.save(save_path)
	
	for seq in seq_list:
		image_dir = os.path.join(data_root, 'image_02/%s' % seq)
		calib_file = os.path.join(data_root, 'calib/%s.txt' % seq)
		result_dir = os.path.join(result_root, '%s/trk_withid/%s' % (result_sha, seq))
		save_3d_bbox_dir = os.path.join(result_dir, '../../trk_image_vis/%s' % seq); mkdir_if_missing(save_3d_bbox_dir)

		# load the list
		images_list, num_images = load_list_from_folder(image_dir)
		print('number of images to visualize is %d' % num_images)
		start_count = 0
		for count in range(start_count, num_images):
			image_tmp = images_list[count]
			if not is_path_exists(image_tmp): 
				count += 1
				continue
			image_index = int(fileparts(image_tmp)[1])
			image_tmp = np.array(Image.open(image_tmp))
			img_height, img_width, img_channel = image_tmp.shape

			result_tmp = os.path.join(result_dir, '%06d.txt'%image_index)		# load the result
			if not is_path_exists(result_tmp): object_res = []
			else: object_res = read_label(result_tmp)
			print('processing index: %d, %d/%d, results from %s' % (image_index, count+1, num_images, result_tmp))
			calib_tmp = Calibration(calib_file)			# load the calibration

			object_res_filtered = []
			for object_tmp in object_res:
				if object_tmp.type not in type_whitelist: continue
				if hasattr(object_tmp, 'score'):
					if object_tmp.score < score_threshold: continue
				center = object_tmp.t
				object_res_filtered.append(object_tmp)

			num_instances = len(object_res_filtered)
			save_image_with_3dbbox_gt_path = os.path.join(save_3d_bbox_dir, '%06d.jpg' % (image_index))
			show_image_with_boxes(image_tmp, object_res_filtered, [], calib_tmp, save_path=save_image_with_3dbbox_gt_path)
			print('number of objects to plot is %d' % (num_instances))
			count += 1
Ejemplo n.º 3
0
        if (len(ret) > 0):
            # x, y, z, theta, l, w, h, ID, other info, confidence
            return np.concatenate(ret), trks
        return np.empty((0, 15)), trks


if __name__ == '__main__':
    if len(sys.argv) != 2:
        print("Usage: python main.py result_sha(e.g., car_3d_det_test)")
        sys.exit(1)

    result_sha = sys.argv[1]
    save_root = '/home/eshan/AB3DMOT/results'

    det_id2str = {1: 'Pedestrian', 2: 'Car', 3: 'Cyclist'}
    seq_file_list, num_seq = load_list_from_folder(
        os.path.join('/home/eshan/AB3DMOT/data/KITTI', result_sha))
    total_time = 0.0
    total_frames = 0
    save_dir = os.path.join(save_root, result_sha)
    mkdir_if_missing(save_dir)
    eval_dir = os.path.join(save_dir, 'data')
    mkdir_if_missing(eval_dir)
    for seq_file in seq_file_list:
        _, seq_name, _ = fileparts(seq_file)
        mot_tracker = AB3DMOT()
        seq_dets = np.loadtxt(seq_file, delimiter=',')  # load detections
        eval_file = os.path.join(eval_dir, seq_name + '.txt')
        eval_file = open(eval_file, 'w')
        save_trk_dir = os.path.join(save_dir, 'trk_withid', seq_name)
        mkdir_if_missing(save_trk_dir)
        print("Processing %s." % (seq_name))
Ejemplo n.º 4
0
        if (len(ret) > 0):
            return np.concatenate(
                ret)  # x, y, z, theta, l, w, h, ID, other info, confidence
        return np.empty((0, 15))


if __name__ == '__main__':
    if len(sys.argv) != 2:
        print("Usage: python main.py result_sha(e.g., car_3d_det_test)")
        sys.exit(1)

    result_sha = sys.argv[1]
    save_root = './results'

    det_id2str = {1: 'Pedestrian', 2: 'Car', 3: 'Cyclist'}
    seq_file_list, num_seq = load_list_from_folder(
        os.path.join('data/KITTI', result_sha))
    total_time = 0.0
    total_frames = 0
    save_dir = os.path.join(save_root, result_sha)
    mkdir_if_missing(save_dir)
    eval_dir = os.path.join(save_dir, 'data')
    mkdir_if_missing(eval_dir)
    for seq_file in seq_file_list:
        _, seq_name, _ = fileparts(seq_file)
        mot_tracker = AB3DMOT()
        seq_dets = np.loadtxt(seq_file, delimiter=',')  #load detections
        eval_file = os.path.join(eval_dir, seq_name + '.txt')
        eval_file = open(eval_file, 'w')
        save_trk_dir = os.path.join(save_dir, 'trk_withid', seq_name)
        mkdir_if_missing(save_trk_dir)
        print("Processing %s." % (seq_name))
Ejemplo n.º 5
0
def vis(result_sha, data_root, result_root):
    def show_image_with_boxes(img,
                              velo,
                              objects_res,
                              objects_res_det,
                              objects_res_raw,
                              labeldata,
                              object_gt,
                              calib,
                              save_path,
                              height_threshold=0,
                              show_lidar=True,
                              save_image=False):
        img2 = np.copy(img)

        for obj in objects_res:
            box3d_pts_2d, _ = compute_box_3d(obj, calib.P)
            color_tmp = tuple(
                [int(tmp * 255) for tmp in colors[obj.id % max_color]])
            img2 = draw_projected_box3d(img2, box3d_pts_2d, color=(0, 0, 255))
            text = 'Tracked ID: %d, Type: %s' % (obj.id, obj.type)
            if box3d_pts_2d is not None:
                img2 = cv2.putText(
                    img2,
                    text,
                    (int(box3d_pts_2d[4, 0]), int(box3d_pts_2d[4, 1]) - 8),
                    cv2.FONT_HERSHEY_TRIPLEX,
                    0.5,
                    color=(0, 0, 255))
        for obj in objects_res_det:
            box3d_pts_2d, _ = compute_box_3d(obj, calib.P)
            color_tmp = tuple(
                [int(tmp * 255) for tmp in colors[obj.id % max_color]])
            img2 = draw_projected_box3d(img2, box3d_pts_2d, color=(0, 255, 0))
            text = 'Detection ID: %d, Type: %s' % (obj.id, obj.type)
            if box3d_pts_2d is not None:
                img2 = cv2.putText(
                    img2,
                    text,
                    (int(box3d_pts_2d[3, 0]), int(box3d_pts_2d[3, 1]) - 8),
                    cv2.FONT_HERSHEY_TRIPLEX,
                    0.5,
                    color=(0, 255, 0))
        import itertools
        labeldata, labeldata2 = itertools.tee(labeldata)
        for obj in labeldata:
            # print("here")
            box3d_pts_2d, _ = compute_box_3d(obj, calib.P)
            img2 = draw_projected_box3d(img2, box3d_pts_2d, color=(255, 0, 0))
            text = 'GT, Type: %s' % (obj.type)
            if box3d_pts_2d is not None:
                # print("also")
                print(text)
                img2 = cv2.putText(
                    img2,
                    text,
                    (int(box3d_pts_2d[4, 0]), int(box3d_pts_2d[4, 1]) - 8),
                    cv2.FONT_HERSHEY_TRIPLEX,
                    0.5,
                    color=(255, 0, 0))
        # for obj in objects_res_raw:
        #     box3d_pts_2d, _ = compute_box_3d(obj, calib.P)
        #     color_tmp = tuple([int(tmp * 255)
        #                        for tmp in colors[obj.id % max_color]])
        #     img2 = draw_projected_box3d(img2, box3d_pts_2d, color=(255,0,0))
        #     text = 'Estimate ID: %d' % obj.id
        #     if box3d_pts_2d is not None:
        #         img2 = cv2.putText(img2, text, (int(box3d_pts_2d[2, 0]), int(
        #             box3d_pts_2d[2, 1]) - 8), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color=(255,0,0))
        if show_lidar:
            show_lidar_with_boxes(velo,
                                  objects=labeldata2,
                                  calib=calib,
                                  objects_pred=objects_res)
        img = Image.fromarray(img2)
        img = img.resize((width, height))
        cv2.imshow("Image", img2)
        cv2.waitKey()

        if save_image:
            print("Saving Image at", save_path)
            img.save(save_path)

        return img2

    for seq in seq_list:
        image_dir = os.path.join(data_root, 'image_02/%s' % seq)
        calib_file = os.path.join(data_root, 'calib/%s.txt' % seq)
        label_file = os.path.join(data_root, 'label_02/%s.txt' % seq)
        velo_dir = os.path.join(data_root, 'velodyne/%s' % seq)
        result_dir = [
            os.path.join(result_root,
                         '%s/trk_withid/%s' % (result_sha[0], seq)),
            os.path.join(result_root,
                         '%s/trk_withid/%s' % (result_sha[1], seq)),
            os.path.join(result_root,
                         '%s/trk_withid/%s' % (result_sha[2], seq))
        ]
        save_3d_bbox_dir = os.path.join(
            result_root,
            '%s/trk_image_vis/%s' % ("Combined_Final_WithLabel", seq))
        mkdir_if_missing(save_3d_bbox_dir)

        # load the list
        images_list, num_images = load_list_from_folder(image_dir)
        velo_list, num_velo = load_list_from_folder(velo_dir)
        print('number of images to visualize is %d' % num_images)
        start_count = 0
        filecontent = np.array([f.split() for f in open(label_file, 'r')])
        # alllabels = np.unique(filecontent[:,2])
        # labels = ['Car', 'Pedestrian', 'Cyclist']
        # finallabelset = [x for x in alllabels if x not in labels]
        # print(alllabels)
        # print(finallabelset)
        # for val in finallabelset:
        #     filecontent = filecontent[filecontent[:,2]!=val,:]
        # print(np.unique(filecontent[:,2]))
        size = (width, height)
        out = cv2.VideoWriter(f'{result_root}/{seq}.avi',
                              cv2.VideoWriter_fourcc(*'DIVX'), 15, size)
        num_images = 1
        for count in range(start_count, num_images):
            image_tmp = images_list[count]
            velo_tmp = velo_list[count]
            if not is_path_exists(image_tmp):
                count += 1
                continue
            image_index = int(fileparts(image_tmp)[1])
            image_tmp = np.array(Image.open(image_tmp))
            img_height, img_width, img_channel = image_tmp.shape
            filecontentframe = filecontent[filecontent[:, 0] ==
                                           str(image_index), :]
            print(len(filecontentframe))
            print(f"Labels for frame {image_index}",
                  np.unique(filecontentframe[:, 2]))
            labeldata = (Object3d(getstringfromarray(line[2:]))
                         for line in filecontentframe)
            object_res = []
            object_res_det = []
            object_res_raw = []
            for dirt in result_dir:
                result_tmp = os.path.join(dirt, '%06d.txt' %
                                          image_index)  # load the result
                if is_path_exists(result_tmp):
                    object_res = object_res + read_label(result_tmp)
                result_tmp_det = os.path.join(dirt, 'det%06d.txt' %
                                              image_index)  # load the result
                if is_path_exists(result_tmp_det):
                    object_res_det = object_res_det + \
                        read_label(result_tmp_det)
                result_tmp_raw = os.path.join(dirt, 'raw%06d.txt' %
                                              image_index)  # load the result
                if is_path_exists(result_tmp_raw):
                    object_res_raw = object_res_raw + \
                        read_label(result_tmp_raw)
            print('processing index: %d, %d/%d, results from %s' %
                  (image_index, count + 1, num_images, result_tmp))
            calib_tmp = Calibration(calib_file)  # load the calibration

            object_res_filtered = []
            for object_tmp in object_res:
                if object_tmp.type not in type_whitelist:
                    continue
                if hasattr(object_tmp, 'score'):
                    if object_tmp.score < score_threshold:
                        continue
                center = object_tmp.t
                object_res_filtered.append(object_tmp)
            object_res_filtered_det = []
            for object_tmp in object_res_det:
                if object_tmp.type not in type_whitelist:
                    continue
                if hasattr(object_tmp, 'score'):
                    if object_tmp.score < score_threshold:
                        continue
                center = object_tmp.t
                object_res_filtered_det.append(object_tmp)
            object_res_filtered_raw = []
            for object_tmp in object_res_raw:
                if object_tmp.type not in type_whitelist:
                    continue
                # if hasattr(object_tmp, 'score'):
                #     if object_tmp.score < score_threshold:
                #         continue
                center = object_tmp.t
                object_res_filtered_raw.append(object_tmp)
            num_instances = len(object_res_filtered)
            save_image_with_3dbbox_gt_path = os.path.join(
                save_3d_bbox_dir, '%06d.jpg' % (image_index))
            velodyne_scan = load_velo_scan(velo_tmp, np.float32, n_vec=4)[:,
                                                                          0:4]
            img = show_image_with_boxes(
                image_tmp,
                velodyne_scan,
                object_res_filtered,
                object_res_filtered_det,
                object_res_filtered_raw,
                labeldata, [],
                calib_tmp,
                save_path=save_image_with_3dbbox_gt_path)
            print('number of objects to plot is %d, %d, %d' %
                  (num_instances, len(object_res_filtered_det),
                   len(object_res_filtered_raw)))
            count += 1
            out.write(img)
        out.release()