def main(argv=None): imgs = data.Preprocess()[8] # # 取最后一组进行预测 # i = 1350 # image1 = [imgs[i - 3], imgs[i - 2], imgs[i - 1]] # image2 = [imgs[i - 72], imgs[i - 48], imgs[i - 24]] # image3 = [imgs[i - 484], imgs[i - 336], imgs[i - 168]] # # 融合成多通道 # merged1 = cv2.merge([image1[0], image1[1], image1[2]]) # merged1 = merged1.reshape(1, size, size, 3) # merged2 = cv2.merge([image2[0], image2[1], image2[2]]) # merged2 = merged2.reshape(1, size, size, 3) # merged3 = cv2.merge([image3[0], image3[1], image3[2]]) # merged3 = merged3.reshape(1, size, size, 3) # # 单步预测未来一个时刻的数据 # predict_y = evaluate(merged1, merged2, merged3) # for predict in predict_y: # print((412640*predict).astype(int)) test_x1, test_x2, test_x3 = [], [], [] for i in range(1350, 1353): # 取短期、周期、趋势三组件数据 image1 = [imgs[i - 3], imgs[i - 2], imgs[i - 1]] image2 = [imgs[i - 72], imgs[i - 48], imgs[i - 24]] image3 = [imgs[i - 484], imgs[i - 336], imgs[i - 168]] # 融合成多通道 merged1 = cv2.merge([image1[0], image1[1], image1[2]]) merged2 = cv2.merge([image2[0], image2[1], image2[2]]) merged3 = cv2.merge([image3[0], image3[1], image3[2]]) test_x1.append(merged1) test_x2.append(merged2) test_x3.append(merged3) predict_y = evaluate(test_x1, test_x2, test_x3) # print(predict_y) print((412640 * predict_y))
def main(argv=None): train_x1, train_x2, train_x3, test_x1, test_x2, test_x3, train_y, test_y = data.Preprocess( )[:8] print('train size:%s, test size:%s' % (len(train_x1), len(test_x1))) # 图片块,每次取32张图片 batch_size = 32 num_batch = len(train_x1) // batch_size print() # 训练网络 train(train_x1, train_x2, train_x3, test_x1, test_x2, test_x3, train_y, test_y, batch_size, num_batch)
def main(argv=None): # 内部条件 train_x1, train_x2, train_x3, test_x1, test_x2, test_x3, train_y, test_y = data.Preprocess( )[:8] # 外部条件 train_ex, test_ex = data.external() print('train size:%s, test size:%s' % (len(train_x1), len(test_x1))) # 图片块,每次取32张图片 batch_size = 32 num_batch = len(train_x1) // batch_size # 训练网络 train(train_x1, train_x2, train_x3, test_x1, test_x2, test_x3, train_y, test_y, batch_size, num_batch, train_ex, test_ex)
def __init__(self, tags, queue_size=20, require_shuffle=False, require_log=False, is_testset=False, n_skip_frames=0, random_num=666, is_flip=False): self.is_testset = is_testset self.shuffled = require_shuffle self.random_num = random_num self.preprocess = data.Preprocess() self.raw_img = Image() self.raw_tracklet = Tracklet() self.raw_lidar = Lidar() # skit some frames self.tags = [ tag for i, tag in enumerate(tags) if i % (n_skip_frames + 1) == 0 ] self.is_flip = is_flip if self.shuffled: self.tags = shuffle(self.tags, random_state=self.random_num) self.tag_index = 0 self.size = len(self.tags) self.require_log = require_log self.flip_axis = 1 # if axis=1, flip from y=0. If axis=0, flip from x=0 self.flip_rate = 2 # if flip_rate is 2, means every two frames self.cache_size = queue_size self.loader_need_exit = Value('i', 0) if use_thread: self.prepr_data = [] self.lodaer_processing = threading.Thread(target=self.loader) else: self.preproc_data_queue = Queue() self.buffer_blocks = [ Array('h', 41246691) for i in range(queue_size) ] self.blocks_usage = Array('i', range(queue_size)) self.lodaer_processing = Process(target=self.loader) self.lodaer_processing.start()
def __init__(self, dir_path, dates_to_drivers=None, indice=None, cache_num=10, is_testset=False): self.dates_to_drivers = dates_to_drivers self.indice = indice self.cache_num = cache_num self.preprocess_path = dir_path self.is_testset = is_testset self.preprocess = data.Preprocess() self.raw_img = Image() self.raw_tracklet = Tracklet() self.raw_lidar = Lidar() # load_file_names is like 1_15_1490991691546439436 for didi or 2012_09_26_0005_00001 for kitti. if indice is None: self.load_file_names = self.get_all_load_index( self.preprocess_path, self.dates_to_drivers, is_testset) self.tags = self.raw_img.get_tags() else: # self.load_file_names = indice self.load_file_names = self.get_specific_load_index( indice, self.preprocess_path, self.dates_to_drivers, is_testset) self.load_once = True self.size = len(self.tags) # self.shuffled_file_names = shuffle(self.load_tags, random_state=1) # for getting current index in shuffled_file_names self.batch_start_index = 0 # num_frame_used means how many frames are used in current batch, if all frame are used, load another batch self.num_frame_used = cache_num # current batch contents self.train_rgbs = [] self.train_tops = [] self.train_fronts = [] self.train_gt_labels = [] self.train_gt_boxes3d = [] self.current_batch_file_names = []
def __init__(self, bags, tags, queue_size=20, require_shuffle=False, require_log=False, is_testset=False): self.is_testset = is_testset self.shuffled = require_shuffle self.preprocess = data.Preprocess() self.raw_img = Image() self.raw_tracklet = Tracklet() self.raw_lidar = Lidar() self.bags = bags # get all tags self.tags = tags if self.shuffled: self.tags = shuffle(self.tags) self.tag_index = 0 self.size = len(self.tags) self.require_log = require_log self.cache_size = queue_size self.loader_need_exit = Value('i', 0) if use_thread: self.prepr_data = [] self.lodaer_processing = threading.Thread(target=self.loader) else: self.preproc_data_queue = Queue() self.buffer_blocks = [ Array('h', 41246691) for i in range(queue_size) ] self.blocks_usage = Array('i', range(queue_size)) self.lodaer_processing = Process(target=self.loader) self.lodaer_processing.start()
o.translation = translation o.rotation = rotation o.size = tracklet.size objects[frames_index.index(i + start_frame)].append(o) return objects if __name__ == '__main__': import data import net.utility.draw as draw from sklearn.utils import shuffle preprocess = data.Preprocess() raw_img = Image() raw_tracklet = Tracklet() raw_lidar = Lidar() tags = shuffle(raw_tracklet.get_tags()) os.makedirs(os.path.join(config.cfg.LOG_DIR, 'test', 'rgb'), exist_ok=True) os.makedirs(os.path.join(config.cfg.LOG_DIR, 'test', 'top'), exist_ok=True) for one_frame_tag in tags: # load objs = raw_tracklet.load(one_frame_tag) rgb = raw_img.load(one_frame_tag) lidar = raw_lidar.load(one_frame_tag)