Beispiel #1
0
def test_case_first_frame():
    tracklet_file = os.path.join(cfg.RAW_DATA_SETS_DIR, '2011_09_26',
                                 '2011_09_26_drive_0005_sync',
                                 'tracklet_labels.xml')
    num_frames = 809
    objects = read_objects(tracklet_file, num_frames)

    a = Tracklet_saver('./test/')

    # object is counted as frame.
    for i in range(num_frames):
        frame_no = i
        frame1 = objects[frame_no]
        coordinate_3d_1, _ = obj_to_gt_boxes3d(frame1)

        translation1, size1, rotation1 = boxes3d_decompose(coordinate_3d_1)

        size = size1
        transition = translation1
        rotation = rotation1

        # frame 1 has multiple cars
        for i in range(len(translation1)):
            a.add_tracklet(frame_no, size[i], transition[i], rotation[i])

    a.write_tracklet()
def test_case_first_frame():
    tracklet_file = os.path.join(cfg.RAW_DATA_SETS_DIR, '2011_09_26', '2011_09_26_drive_0005_sync',
                                 'tracklet_labels.xml')
    num_frames = 809
    objects = read_objects(tracklet_file, num_frames)

    a = Tracklet_saver('./test/')

    # object is counted as frame.
    for i in range(num_frames):
        frame_no = i
        frame1 = objects[frame_no]
        coordinate_3d_1, _ = obj_to_gt_boxes3d(frame1)

        translation1, size1, rotation1 = boxes3d_decompose(coordinate_3d_1)


        size = size1
        transition = translation1
        rotation = rotation1

        # frame 1 has multiple cars
        for i in range(len(translation1)):
            a.add_tracklet(frame_no, size[i], transition[i], rotation[i])

    a.write_tracklet()
    def run(self, iter):
        # load dataset from didi
        train_rgbs, train_tops, train_fronts, train_gt_labels, train_gt_boxes3d, \
        frame_id = self.val_set_loader.load(1)

        # '1/15/00000'.split('/')[2] == '00000':
        if frame_id[0].split('/')[2] == '00000':
            self.last_states_c = np.zeros((1, n_hidden))
            self.last_states_h = np.zeros((1, n_hidden))
            n_history_saved = 0

        trans_gt, size, rotation = boxes3d.boxes3d_decompose(train_gt_boxes3d[0])
        trans_history, _ = get_train_data2(n_max_objects_input, trans_gt)

        feed = {
            self.net_trans_history: trans_history,
            self.net_trans_gt: trans_gt,
            self.net_rnn_states_c: self.last_states_c,
            self.net_rnn_states_h: self.last_states_h,
            IS_TRAIN_PHASE: True
        }

        self.last_states_c, self.last_states_h = last_states.c, last_states.h

        summary, total_loss, trans_reg_loss, error_reg_loss, predict_tans, error_predict, = \
            sess.run([summary_op, net_loss, net_trans_reg_loss, net_score_reg_loss,
                      net_trans_predict, net_score_predict], feed)
        self.summary_writer.add_summary(summary, iter)
Beispiel #4
0
def pred_and_save(tracklet_pred_dir, dataset,frame_offset=0, log_tag=None, weights_tag=None):
    top_shape, front_shape, rgb_shape = dataset.get_shape()
    predict = mv3d.Predictor(top_shape, front_shape, rgb_shape, log_tag=log_tag, weights_tag=weights_tag)

    queue = deque(maxlen=1)

    # timer
    timer_step = 100
    if cfg.TRACKING_TIMER:
        time_it = timer()

    # dataset.size - 1 for in dataset.get_shape(), a frame is used. So it'll omit first frame for prediction,
    # fix this if has more time
    for i in range(dataset.size-1 if fast_test == False else frame_offset + 1):

        rgb, top, front, _, _, frame_id = dataset.load()

        # handling multiple bags.
        current_bag = frame_id.split('/')[1]
        current_frame_num = int(frame_id.split('/')[2])
        if i == 0:
            prev_tag_bag = None
        else:
            prev_tag_bag = queue[0]
        if current_bag != prev_tag_bag:
            # print('current bag name: ', current_bag, '. previous bag name ', prev_tag_bag)
            if i != 0:
                tracklet.write_tracklet()
            tracklet = Tracklet_saver(tracklet_pred_dir, current_bag,exist_ok=True)
            # print('frame counter reset to 0. ')
        queue.append(current_bag)

        # frame_num = i - frame_offset
        # if frame_num < 0:
        #     continue

        # detection
        boxes3d, probs = predict(top, front, rgb)
        # predict.dump_log(log_subdir=os.path.join('tracking',log_tag), n_frame=i, frame_tag=frame_id)

        # time timer_step iterations. Turn it on/off in config.py
        if cfg.TRACKING_TIMER and i % timer_step == 0 and i != 0:
            predict.track_log.write('It takes %0.2f secs for inferring %d frames. \n' % \
                                    (time_it.time_diff_per_n_loops(), timer_step))

        if len(boxes3d) != 0:
            translation, size, rotation = boxes3d_decompose(boxes3d[:, :, :])
            # print(translation)
            # print(len(translation))
            # add to tracklets
            for j in range(len(translation)):
                # if 0 < translation[j, 1] < 8:
                # print('pose wrote. '
                tracklet.add_tracklet(current_frame_num, size[j], translation[j], rotation[j], probs[j],boxes3d[j])

        # print('frame_counter is here: ', current_frame_num, ' and i is here: ', i, 'frame id is here: ', frame_id)



    tracklet.write_tracklet()
    predict.save_weights(dir=os.path.join(log_dir, 'pretrained_model'))

    if cfg.TRACKING_TIMER:
        predict.log_msg.write('It takes %0.2f secs for inferring the whole test dataset. \n' % \
                              (time_it.total_time()))

    print("tracklet file named tracklet_labels.xml is written successfully.")
    return tracklet.path
def pred_and_save(tracklet_pred_dir,
                  dataset,
                  generate_video=False,
                  frame_offset=16,
                  log_tag=None,
                  weights_tag=None):
    # Tracklet_saver will check whether the file already exists.
    tracklet = Tracklet_saver(tracklet_pred_dir)
    os.makedirs(os.path.join(log_dir, 'image'), exist_ok=True)

    top_shape, front_shape, rgb_shape = dataset.get_shape()
    predict = mv3d.Predictor(top_shape,
                             front_shape,
                             rgb_shape,
                             log_tag=log_tag,
                             weights_tag=weights_tag)

    if generate_video:
        vid_in = skvideo.io.FFmpegWriter(os.path.join(log_dir, 'output.mp4'))

    # timer
    timer_step = 100
    if cfg.TRACKING_TIMER:
        time_it = timer()

    frame_num = 0
    for i in range(dataset.size if fast_test == False else frame_offset + 1):

        rgb, top, front, _, _, _ = dataset.load()

        frame_num = i - frame_offset
        if frame_num < 0:
            continue

        boxes3d, probs = predict(top, front, rgb)
        predict.dump_log(log_subdir=log_subdir, n_frame=i)

        # time timer_step iterations. Turn it on/off in config.py
        if cfg.TRACKING_TIMER and i % timer_step == 0 and i != 0:
            predict.track_log.write('It takes %0.2f secs for inferring %d frames. \n' % \
                                    (time_it.time_diff_per_n_loops(), timer_step))

        # for debugging: save image and show image.
        top_image = draw_top_image(top[0])
        rgb_image = rgb[0]

        if len(boxes3d) != 0:
            top_image = draw_box3d_on_top(top_image,
                                          boxes3d[:, :, :],
                                          color=(80, 80, 0),
                                          thickness=3)
            rgb_image = draw_box3d_on_camera(rgb_image,
                                             boxes3d[:, :, :],
                                             color=(0, 0, 80),
                                             thickness=3)
            translation, size, rotation = boxes3d_decompose(boxes3d[:, :, :])
            # todo: remove it after gtbox is ok
            size[:, 1:3] = size[:, 1:3] / cfg.TRACKLET_GTBOX_LENGTH_SCALE

            for j in range(len(translation)):
                if 0 < translation[j, 1] < 8:
                    tracklet.add_tracklet(frame_num, size[j], translation[j],
                                          rotation[j])
        resize_scale = top_image.shape[0] / rgb_image.shape[0]
        rgb_image = cv2.resize(
            rgb_image,
            (int(rgb_image.shape[1] * resize_scale), top_image.shape[0]))
        rgb_image = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2RGB)
        new_image = np.concatenate((top_image, rgb_image), axis=1)
        cv2.imwrite(os.path.join(log_dir, 'image', '%5d_image.jpg' % i),
                    new_image)

        if generate_video:
            vid_in.writeFrame(new_image)
            vid_in.close()

    tracklet.write_tracklet()
    predict.dump_weigths(os.path.join(log_dir, 'pretrained_model'))

    if cfg.TRACKING_TIMER:
        predict.log_msg.write('It takes %0.2f secs for inferring the whole test dataset. \n' % \
                              (time_it.total_time()))

    print("tracklet file named tracklet_labels.xml is written successfully.")
    return tracklet.path
Beispiel #6
0
def pred_and_save(tracklet_pred_dir, dataset, generate_video=False,
                  frame_offset=16, log_tag=None, weights_tag=None):
    # Tracklet_saver will check whether the file already exists.
    tracklet = Tracklet_saver(tracklet_pred_dir)
    os.makedirs (os.path.join(log_dir,'image'),exist_ok=True)


    top_shape, front_shape, rgb_shape=dataset.get_shape()
    predict=mv3d.Predictor(top_shape, front_shape, rgb_shape, log_tag=log_tag, weights_tag=weights_tag)

    if generate_video:
        vid_in = skvideo.io.FFmpegWriter(os.path.join(log_dir,'output.mp4'))

    # timer
    timer_step = 100
    if cfg.TRACKING_TIMER:
        time_it = timer()

    frame_num = 0
    for i in range(dataset.size):

        rgb, top, front, _, _,_= dataset.load(1)

        frame_num = i - frame_offset
        if frame_num < 0:
            continue

        boxes3d,probs=predict(top, front, rgb)
        predict.dump_log(log_subdir=log_subdir,n_frame=i)

        # time timer_step iterations. Turn it on/off in config.py
        if cfg.TRACKING_TIMER and i%timer_step ==0 and i!=0:
            predict.track_log.write('It takes %0.2f secs for inferring %d frames. \n' % \
                                   (time_it.time_diff_per_n_loops(), timer_step))

        # for debugging: save image and show image.
        top_image = draw_top_image(top[0])
        rgb_image = rgb[0]


        if len(boxes3d)!=0:
            top_image = draw_box3d_on_top(top_image, boxes3d[:,:,:], color=(80, 80, 0), thickness=3)
            rgb_image = draw_box3d_on_camera(rgb_image, boxes3d[:, :, :], color=(0, 0, 80), thickness=3)
            translation, size, rotation = boxes3d_decompose(boxes3d[:, :, :])
            #todo: remove it after gtbox is ok
            size[:,1:3] = size[:,1:3]/cfg.TRACKLET_GTBOX_LENGTH_SCALE

            for j in range(len(translation)):
                if 0<translation[j,1]<8:
                    tracklet.add_tracklet(frame_num, size[j], translation[j], rotation[j])
        resize_scale=top_image.shape[0]/rgb_image.shape[0]
        rgb_image = cv2.resize(rgb_image,(int(rgb_image.shape[1]*resize_scale), top_image.shape[0]))
        rgb_image = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2RGB)
        new_image = np.concatenate((top_image, rgb_image), axis = 1)
        cv2.imwrite(os.path.join(log_dir,'image','%5d_image.jpg'%i), new_image)

        if generate_video:
            vid_in.writeFrame(new_image)
            vid_in.close()

    tracklet.write_tracklet()
    predict.dump_weigths(os.path.join(log_dir, 'pretrained_model'))

    if cfg.TRACKING_TIMER:
        predict.log_msg.write('It takes %0.2f secs for inferring the whole test dataset. \n' % \
                              (time_it.total_time()))

    print("tracklet file named tracklet_labels.xml is written successfully.")
    return tracklet.path
def pred_and_save(tracklet_pred_dir,
                  dataset,
                  frame_offset=0,
                  log_tag=None,
                  weights_tag=None):
    top_shape, front_shape, rgb_shape = dataset.get_shape()
    predict = mv3d.Predictor(top_shape,
                             front_shape,
                             rgb_shape,
                             log_tag=log_tag,
                             weights_tag=weights_tag)

    queue = deque(maxlen=1)

    # timer
    timer_step = 100
    if cfg.TRACKING_TIMER:
        time_it = timer()

    # dataset.size - 1 for in dataset.get_shape(), a frame is used. So it'll omit first frame for prediction,
    # fix this if has more time
    for i in range(dataset.size - 1 if fast_test == False else frame_offset +
                   1):

        rgb, top, front, _, _, frame_id = dataset.load()

        # handling multiple bags.
        current_bag = frame_id.split('/')[1]
        current_frame_num = int(frame_id.split('/')[2])
        if i == 0:
            prev_tag_bag = None
        else:
            prev_tag_bag = queue[0]
        if current_bag != prev_tag_bag:
            # print('current bag name: ', current_bag, '. previous bag name ', prev_tag_bag)
            if i != 0:
                tracklet.write_tracklet()
            tracklet = Tracklet_saver(tracklet_pred_dir,
                                      current_bag,
                                      exist_ok=True)
            # print('frame counter reset to 0. ')
        queue.append(current_bag)

        # frame_num = i - frame_offset
        # if frame_num < 0:
        #     continue

        # detection
        boxes3d, probs = predict(top, front, rgb)
        # predict.dump_log(log_subdir=os.path.join('tracking',log_tag), n_frame=i, frame_tag=frame_id)

        # time timer_step iterations. Turn it on/off in config.py
        if cfg.TRACKING_TIMER and i % timer_step == 0 and i != 0:
            predict.track_log.write('It takes %0.2f secs for inferring %d frames. \n' % \
                                    (time_it.time_diff_per_n_loops(), timer_step))

        if len(boxes3d) != 0:
            translation, size, rotation = boxes3d_decompose(boxes3d[:, :, :])
            # print(translation)
            # print(len(translation))
            # add to tracklets
            for j in range(len(translation)):
                # if 0 < translation[j, 1] < 8:
                # print('pose wrote. '
                tracklet.add_tracklet(current_frame_num, size[j],
                                      translation[j], rotation[j], probs[j],
                                      boxes3d[j])

        # print('frame_counter is here: ', current_frame_num, ' and i is here: ', i, 'frame id is here: ', frame_id)

    tracklet.write_tracklet()
    predict.save_weights(dir=os.path.join(log_dir, 'pretrained_model'))

    if cfg.TRACKING_TIMER:
        predict.log_msg.write('It takes %0.2f secs for inferring the whole test dataset. \n' % \
                              (time_it.total_time()))

    print("tracklet file named tracklet_labels.xml is written successfully.")
    return tracklet.path
Beispiel #8
0
dir = os.path.join(sys.path[0], "../MV3D/data/preprocessed/didi")

rgb_path = os.path.join(dir, "rgb", "1/6_f", "00000.png")
rgb = cv2.imread(rgb_path)
top_path = os.path.join(dir, "top", "1/6_f", "00000.npy")
top = np.load(top_path)
front = np.zeros((1, 1), dtype=np.float32)

np_reshape = lambda np_array: np_array.reshape(1, *(np_array.shape))
top = np_reshape(top)
front = np_reshape(front)
rgb = np_reshape(rgb)

boxes3d, probs = m3.predict(top, front, rgb)
print("predict boxes len=%d" % len(boxes3d))
translation, size, rotation = boxes3d_decompose(boxes3d)
print("test translation: {}".format(translation))
print("test size: {}".format(size))
print("test rotation: {}".format(rotation))


class MioServer(xmlrpc.XMLRPC):
    """
    An example object to be published.
    """
    def xmlrpc_test0(self):
        return np.sqrt(2).tolist()

    def xmlrpc_test1(self):
        print("call xmlrpc_test1")
        return [[1, -1], [2, -2], [3, -3]]  #np.sqrt(2)
Beispiel #9
0
dir = os.path.join(sys.path[0], "../MV3D/data/preprocessed/didi")

rgb_path = os.path.join(dir, "rgb", "1/6_f", "00000.png")
rgb = cv2.imread(rgb_path)
top_path = os.path.join(dir, "top", "1/6_f", "00000.npy")
top = np.load(top_path)
front = np.zeros((1, 1), dtype=np.float32)

np_reshape = lambda np_array: np_array.reshape(1, *(np_array.shape))
top_view = np_reshape(top)
front_view = np_reshape(front)
rgb_view = np_reshape(rgb)

start = time.time()
np.save(os.path.join(sys.path[0], "../MV3D/data/", "top.npy"), top_view)
np.save(os.path.join(sys.path[0], "../MV3D/data/", "rgb.npy"), rgb_view)
end = time.time()
print("save npy use time={} seconds".format(end-start))

start = time.time()
boxes3d = rpc.predict()
end = time.time()

if len(boxes3d):
    translation, size, rotation = boxes3d_decompose(np.array(boxes3d))
    print("predict boxes len={} use time={} seconds".format(len(boxes3d),end-start))
    print("test translation: {}".format(translation))
    print("test size: {}".format(size))
    print("test rotation: {}".format(rotation))
Beispiel #10
0
def pred_and_save(tracklet_pred_dir,
                  dataset,
                  generate_video=False,
                  frame_offset=16,
                  log_tag=None,
                  weights_tag=None):
    # Tracklet_saver will check whether the file already exists.
    tracklet = Tracklet_saver(tracklet_pred_dir, 'pred')
    os.makedirs(os.path.join(log_dir, 'image'), exist_ok=True)
    gt_tracklet = Tracklet_saver(tracklet_pred_dir, 'gt')

    top_shape, front_shape, rgb_shape = dataset.get_shape()
    predict = mv3d.Predictor(top_shape,
                             front_shape,
                             rgb_shape,
                             log_tag=log_tag,
                             weights_tag=weights_tag)

    if generate_video:
        vid_in = skvideo.io.FFmpegWriter(os.path.join(log_dir, 'output.mp4'))

    # timer
    timer_step = 100
    if cfg.TRACKING_TIMER:
        time_it = timer()

    print('dataset.size')
    print(dataset.size)
    lenght = []
    gt_lenght = []

    frame_num = 0
    for i in range(dataset.size if fast_test == False else frame_offset + 1):

        rgb, top, front, _, _, _ = dataset.load(size=1)

        frame_num = i - frame_offset
        print('frame_num')
        print(frame_num)
        if frame_num < 0:
            continue

        gt_boxes3d_tmp = np.load(
            '/home/mohsen/Desktop/MV3D/data/preprocessed/kitti/gt_boxes3d/object3d/test/%05d.npy'
            % i)

        #remove gt boxes with hiegh less than 40
        gt_boxes3d_list = []
        for gt_box3d_tmp in gt_boxes3d_tmp:
            # if gt_box3d_tmp[0,0]>0:
            gt_box3d_tmp_list = []
            gt_box3d_tmp_list.append(gt_box3d_tmp)
            gt_project = box3d.box3d_to_rgb_box(gt_box3d_tmp_list)

            if abs(gt_project[0][0, 1] - gt_project[0][4, 1]) >= 40:
                gt_box3d = gt_box3d_tmp
                gt_boxes3d_list.append(gt_box3d)
        gt_boxes3d = np.array(gt_boxes3d_list)
        # gt_boxes3d = gt_boxes3d_tmp

        #####################################
        boxes3d_tmp, probs = predict(top, front, rgb)

        predict.dump_log(log_subdir=log_subdir, n_frame=i)

        # time timer_step iterations. Turn it on/off in config.py
        if cfg.TRACKING_TIMER and i % timer_step == 0 and i != 0:
            predict.track_log.write('It takes %0.2f secs for inferring %d frames. \n' % \
                                    (time_it.time_diff_per_n_loops(), timer_step))

        # for debugging: save image and show image.
        top_image = draw_top_image(top[0])
        rgb_image = rgb[0]

        if len(gt_boxes3d) != 0:

            gt_lenght.append(len(gt_boxes3d))

            gt_translation, gt_size, gt_rotation = boxes3d_decompose(
                gt_boxes3d[:, :, :])

            # todo: remove it after gtbox is ok
            gt_size[:, 1:3] = gt_size[:, 1:3] / cfg.TRACKLET_GTBOX_LENGTH_SCALE

            for j in range(len(gt_translation)):
                gt_tracklet.add_tracklet(frame_num, gt_size[j],
                                         gt_translation[j], gt_rotation[j])

        #remove predicted boxes with hiegh less than 40
        boxes3d_list = []
        for box3d_tmp in boxes3d_tmp:
            # if box3d_tmp[0, 0] > 0:

            box3d_tmp_list = []
            box3d_tmp_list.append(box3d_tmp)
            project = box3d.box3d_to_rgb_box(box3d_tmp_list)

            if abs(project[0][0, 1] - project[0][4, 1]) >= 40:
                print(project[0][0, 1] - project[0][4, 1])
                pred_box3d = box3d_tmp
                boxes3d_list.append(pred_box3d)
        boxes3d = np.array(boxes3d_list)
        # boxes3d = boxes3d_tmp

        #####################################
        print('sizes')
        print(np.size(boxes3d))
        print(gt_boxes3d)
        print(np.size(gt_boxes3d))

        if len(boxes3d) != 0:
            lenght.append(len(boxes3d))

            top_image = draw_box3d_on_top(top_image,
                                          boxes3d[:, :, :],
                                          color=(80, 80, 0),
                                          thickness=3)
            rgb_image = draw_box3d_on_camera(rgb_image,
                                             boxes3d[:, :, :],
                                             color=(0, 0, 80),
                                             thickness=3)

            if len(gt_boxes3d) != 0:
                rgb_image = draw_box3d_on_camera(rgb_image,
                                                 gt_boxes3d[:, :, :],
                                                 color=(0, 80, 0),
                                                 thickness=3)

            translation, size, rotation = boxes3d_decompose(boxes3d[:, :, :])

            # todo: remove it after gtbox is ok
            size[:, 1:3] = size[:, 1:3] / cfg.TRACKLET_GTBOX_LENGTH_SCALE

            for j in range(len(translation)):
                tracklet.add_tracklet(frame_num, size[j], translation[j],
                                      rotation[j])
        resize_scale = top_image.shape[0] / rgb_image.shape[0]
        rgb_image = cv2.resize(
            rgb_image,
            (int(rgb_image.shape[1] * resize_scale), top_image.shape[0]))
        rgb_image = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2RGB)
        new_image = np.concatenate((top_image, rgb_image), axis=1)
        cv2.imwrite(os.path.join(log_dir, 'image', '%5d_image.jpg' % i),
                    new_image)

        if generate_video:
            vid_in.writeFrame(new_image)
            vid_in.close()

    print(lenght)
    print(sum(lenght))
    tracklet.write_tracklet()
    predict.dump_weigths(os.path.join(log_dir, 'pretrained_model'))
    print(gt_lenght)
    print(sum(gt_lenght))
    gt_tracklet.write_tracklet()

    if cfg.TRACKING_TIMER:
        predict.log_msg.write('It takes %0.2f secs for inferring the whole test dataset. \n' % \
                              (time_it.total_time()))

    print("tracklet file named tracklet_labels.xml is written successfully.")
    return tracklet.path, gt_tracklet.path
        last_states_c = np.zeros((1, n_hidden))
        last_states_h = np.zeros((1, n_hidden))

        for i in range(20000):

            # load dataset from didi
            train_rgbs, train_tops, train_fronts, train_gt_labels, train_gt_boxes3d,\
            frame_id = tra_set_loader.load(1)

            #'1/15/00000'.split('/')[2] == '00000':
            if frame_id[0].split('/')[2] == '00000':
                last_states_c = np.zeros((1, n_hidden))
                last_states_h = np.zeros((1, n_hidden))
                n_history_saved = 0

            trans_gt, size, rotation= boxes3d.boxes3d_decompose(train_gt_boxes3d[0])
            trans_history, _ = get_train_data2(n_max_objects_input,trans_gt )


            feed={
                net_trans_history: trans_history,
                net_trans_gt: trans_gt,
                net_rnn_states_c: last_states_c,
                net_rnn_states_h: last_states_h,
                IS_TRAIN_PHASE: True
            }


            _, total_loss,trans_reg_loss,error_reg_loss,predict_tans,error_predict,last_states = \
                sess.run([solver_step,net_loss, net_trans_reg_loss, net_score_reg_loss,
                          net_trans_predict, net_score_predict, net_states], feed)
Beispiel #12
0
def sync_callback(msg1, msg2):
    # msg1: /image_raw   # msg2: /velodyne_points: velodyne_points
    func_start = time.time()
    timestamp1 = msg1.header.stamp.to_nsec()
    print('image_callback: msg : seq=%d, timestamp=%19d' %
          (msg1.header.seq, timestamp1))
    timestamp2 = msg2.header.stamp.to_nsec()
    print('velodyne_callback: msg : seq=%d, timestamp=%19d' %
          (msg2.header.seq, timestamp2))

    arr = msg_to_arr(msg2)
    lidar = np.array([[item[0], item[1], item[2], item[3]] for item in arr])

    camera_image = bridge.imgmsg_to_cv2(msg1, "bgr8")
    print("camera_image is {}".format(camera_image.shape))

    top_view = point_cloud_2_top(lidar,
                                 res=0.2,
                                 zres=0.5,
                                 side_range=(-45, 45),
                                 fwd_range=(-45, 45),
                                 height_range=(-3, 0.5))
    top_image = draw_top_image(top_view[:, :, -1])

    if 0:  # if show the images
        cemara_show_image = cv2.resize(
            camera_image,
            (camera_image.shape[1] // 2, camera_image.shape[0] // 2))
        top_show_image_width = camera_image.shape[0] // 2
        top_show_image = cv2.resize(
            top_image, (top_show_image_width, top_show_image_width))
        show_image = np.concatenate((top_show_image, cemara_show_image),
                                    axis=1)
        cv2.imshow("top", show_image)
        cv2.waitKey(1)

    # use test data until round2 pipeline is ok
    np_reshape = lambda np_array: np_array.reshape(1, *(np_array.shape))
    top_view = np_reshape(top)
    front_view = np_reshape(front)
    rgb_view = np_reshape(rgb)

    np.save(os.path.join(sys.path[0], "../MV3D/data/", "top.npy"), top_view)
    np.save(os.path.join(sys.path[0], "../MV3D/data/", "rgb.npy"), rgb_view)

    start = time.time()
    boxes3d = rpc.predict()
    end = time.time()
    print("predict boxes len={} use predict time: {} seconds.".format(
        len(boxes3d), end - start))

    if len(boxes3d) > 0:
        translation, size, rotation = boxes3d_decompose(np.array(boxes3d))
        # publish (boxes3d) to tracker_node
        markerArray = MarkerArray()
        for i in range(len(boxes3d)):
            m = Marker()
            m.type = Marker.CUBE
            m.header.frame_id = "velodyne"
            m.header.stamp = msg2.header.stamp
            m.scale.x, m.scale.y, m.scale.z = size[i][0], size[i][1], size[i][
                2]
            m.pose.position.x, m.pose.position.y, m.pose.position.z = \
                translation[i][0], translation[i][1], translation[i][2]
            m.pose.orientation.x, m.pose.orientation.y, m.pose.orientation.z, m.pose.orientation.w = \
                rotation[i][0], rotation[i][1], rotation[i][2], 0.
            m.color.a, m.color.r, m.color.g, m.color.b = \
                1.0, 0.0, 1.0, 0.0
            markerArray.markers.append(m)
        pub.publish(markerArray)

    func_end = time.time()
    print("sync_callback use {} seconds".format(func_end - func_start))