Beispiel #1
0
    def get_fname_from_label(strings):
        """
        files/32beams_dingdianlukou_2018-03-12-11-02-41/32beams_dingdianlukou_2018-03-12-11-02-41_0.pcd
        files/32_daxuecheng_01803191740/32_daxuecheng_01803191740_1.pcd
        files/32_gaosulu_test/32_gaosulu_test_1.pcd
        files/xuanchuan/xuanchuan_200.pcd
        :param strings:
        :return:
        """

        regulars = [
            'files/32_gaosulu_test/32_gaosulu_test_\d+.pcd',
            'files/32_daxuecheng_01803191740/32_daxuecheng_01803191740_\d+.pcd',
            'files/32beams_dingdianlukou_2018-03-12-11-02-41/32beams_dingdianlukou_2018-03-12-11-02-41_\d+.pcd',
            'files/xuanchuan/xuanchuan_\d+.pcd',
            'files/p3_beihuan_B16_11803221546/p3_beihuan_B16_11803221546_\d+.pcd',
            'files/32_yuanqu_11804041320/32_yuanqu_11804041320_\d+.pcd',
        ]  # TODO:add more regular
        for i in range(len(regulars)):
            res = re.findall(regulars[i], strings)
            if len(res) != 0:
                if len(res) == 1:
                    return res[0][6:]
                else:
                    print red(
                        'File->dataset_sti,function->get_fname_from_label \n  regular expression get more than one qualified file name,string:{}'
                        .format(strings))
                    exit(22)
        print red(
            'File->dataset_sti,function->get_fname_from_label: There is no illegal file name in string: {}'
            .format(strings))
        exit(23)
def checkArgs(Args):

    # print('Using config:')
    # pprint.pprint(cfg)

    print yellow("Checking the args ...")

    if Args.fineTune == 'True':
        Args.fine_tune = True
    else:
        Args.fine_tune = False

    if Args.useDemo == 'True':
        Args.use_demo = True
    else:
        Args.use_demo = False

    if Args.method == 'test':
        if Args.weights is None:
            print red("  Specify the testing network weights!")
            sys.exit(3)
        else:
            print blue("  Test the weight: \n    {}".format(Args.weights))
    elif Args.fine_tune:
        if Args.weights is None:
            print red("  Specify the finetune network weights!")
            sys.exit(4)
        else:
            print blue("  Finetune the weight:  {}".format(Args.weights))
    else:
        print red("  The network will RE-TRAIN from empty ! ! ")
    print '  Called with args:', args
Beispiel #3
0
    def load_sti_annotation(self):
        total_box_labels, total_fnames, total_object_labels, total_height_labels = [], [], [], []
        for index, folder in enumerate(self.folder_list):
            print(green('  Process the folder {}'.format(folder)))
            #  TODO:declaration: the result.txt file in shrink_box_label_bk contains illegal number like: "x":"-1.#IND00","y":"-1.#IND00","z":"-1.#IND00"
            libel_fname = path_add(self.data_path, folder, 'label',
                                   'result.txt')
            pixel_libel_folder = path_add(self.data_path, folder, 'label_rect')
            box_label, files_names, one_object_label, one_height_label = [], [], [], []
            with open(libel_fname, 'r') as f:
                frames = f.readlines()
            for idx__, one_frame in enumerate(
                    frames):  # one frame in a series data
                one_frame = one_frame.replace('unknown', '0.0').replace('smallMot', '1.0').replace('bigMot', '2.0') \
                    .replace('nonMot', '3.0').replace('pedestrian', '4.0').replace('dontcare', '0.0')
                object_str = one_frame.translate(None,
                                                 '\"').split('position:{')[1:]
                label_in_frame = []
                if idx__ % 150 == 0:
                    print("    Process is going on {}/{} ".format(
                        idx__, len(frames)))
                for obj in object_str:
                    f_str_num = re.findall('[-+]?\d+\.\d+', obj)
                    f_num = map(float, f_str_num)
                    if len(
                            f_num
                    ) == 11:  # filter the  wrong type label like   type: position
                        label_in_frame.append(f_num)
                    else:  # toxic label ! shit!
                        print(
                            red('    There is a illegal lbael(length:{}) in result.txt in frame-{} without anything in folder {} and it has been dropped'
                                .format(len(f_num), idx__, folder)))
                        print f_num
                        # print one_frame

                label_in_frame_np = np.array(label_in_frame,
                                             dtype=np.float32).reshape(-1, 11)
                if label_in_frame_np.shape[0] == 0:
                    print(
                        red('    There is a empty frame-{} without anything in folder {} and it has been dropped'
                            .format(idx__, folder)))
                    continue
                if len(np.where(label_in_frame_np[:, 9] != 0)[0]) == 0:
                    print(
                        red('    There is a frame-{} without any object in folder {} and it has been dropped'
                            .format(idx__, folder)))
                    continue
                box_label.append(label_in_frame_np[:, (
                    0, 1, 2, 6, 7, 8, 3,
                    9)])  # extract the valuable data:x,y,z,l,w,h,theta,type
                files_names.append(self.get_fname_from_label(one_frame))

            print("    Loading .npy labels ... ")
            for file_ in sorted(os.listdir(pixel_libel_folder),
                                key=lambda name: int(name[0:-4])):
                data_matrix = np.load(path_add(pixel_libel_folder, file_))
                one_object_label.append(data_matrix[:, :, 0:1])  # TODO:check
                one_height_label.append(data_matrix[:, :, 1:2])
            assert len(one_object_label) == len(
                files_names
            ), "There happens a ERROR when generating dataset in dataset.py"
            total_box_labels.extend(box_label)
            total_fnames.extend(files_names)
            total_object_labels.extend(one_object_label)
            total_height_labels.extend(one_height_label)
            print("  Completing loading {} is done!  ".format(folder))

        print("  Zip data in one dict ... ")
        return_dataset = [
            dict({
                'files_name': total_fnames[i],
                'boxes_labels': total_box_labels[i],
                'object_labels': total_object_labels[i],
                'height_labels': total_height_labels[i]
            }) for i in range(len(total_fnames))
        ]

        print("  Total number of frames is {}".format(len(total_fnames)))
        return return_dataset
Beispiel #4
0
    # rospy.init_node('node_labels')
    # label_pub = rospy.Publisher('labels', MarkerArray, queue_size=100)
    # point_pub = rospy.Publisher('points', PointCloud, queue_size=100)
    # rospy.loginfo('Ros begin ...')
    # while True:
    #     blobs = dataset.get_minibatch(idx)
    #     pointcloud = PointCloud_Gen(blobs["lidar3d_data"], frameID='rslidar')
    #     label_box = Boxes_labels_Gen(blobs["boxes_labels"], ns='test_box')
    #     label_pub.publish(label_box)
    #     point_pub.publish(pointcloud)
    #     rospy.loginfo('Send {} frame'.format(idx))
    #     idx += 1

    dataset = DataSetTrain()
    print red('Generate dataset Done!!')

    # name = '/home/hexindong/Videos/Apoxel-Server/RSdata32b/32_gaosulu_test/pcd/32_gaosulu_test_435.pcd'
    # a = dataset.check_name_get_data(name)
    # print(yellow('Convert {} data into pkl file ...').format(dataset.training_rois_length))
    for idx in range(dataset.training_rois_length):
        blobs = dataset.get_minibatch(idx)
        name = blobs['serial_num']
        points = blobs['grid_stack']
        a = 0
        # np.save('/home/hexindong/he/Apoxel-Server/32_yuanqu_11804041320_152.npy',points)
        # exit()
        # data_pkl_name = os.path.join(cfg.DATA_DIR,name.split('/')[0],'data_pkl',name.split('/')[1][:-4]+'.pkl')
        # with open(data_pkl_name, 'wb') as fid:
        #     cPickle.dump(blobs, fid, cPickle.HIGHEST_PROTOCOL)
        #     print '  Wrote data_pkl to {}'.format(data_pkl_name)
Beispiel #5
0
    def training(self, sess):
        with tf.name_scope('loss_cube'):
            cube_score = self.network.cube_score
            cube_label = self.network.cube_label

            if self.arg.focal_loss:
                alpha = [1.0, 1.0]
                gamma = 2
                cube_probi = tf.nn.softmax(cube_score)
                tmp = tf.one_hot(cube_label, depth=2) * (
                    (1 - cube_probi)**
                    gamma) * tf.log([cfg.EPS, cfg.EPS] + cube_probi) * alpha
                cube_cross_entropy = tf.reduce_mean(
                    -tf.reduce_sum(tmp, axis=1))
            else:
                cube_probi = tf.nn.softmax(cube_score)  # use for debug
                tmp = tf.nn.sparse_softmax_cross_entropy_with_logits(
                    logits=cube_score, labels=cube_label)
                cube_cross_entropy = tf.reduce_mean(tmp)

            loss = cube_cross_entropy

        with tf.name_scope('train_op'):
            global_step = tf.Variable(1, trainable=False, name='Global_Step')
            lr = tf.train.exponential_decay(self.arg.lr,
                                            global_step,
                                            1000,
                                            0.90,
                                            name='decay-Lr')
            train_op = tf.train.MomentumOptimizer(lr, momentum=0.9).minimize(
                loss, global_step=global_step)

        with tf.name_scope('train_cubic'):
            extractor_int = self.network.extractor_int
            extractor_float = self.network.extractor_weighs_float
            extractor_outs = self.network.extractor_outs  #(160, 30, 30, 15, 32)
            # extractor_F_grad = tf.gradients(loss, extractor_float)
            # extractor_Int_grad = tf.gradients(loss, extractor_int)
            # conv1_grad = tf.gradients(loss, self.network.conv1)
            # conv2_grad = tf.gradients(loss, self.network.conv2)
            # conv3_grad = tf.gradients(loss, self.network.conv3)
            # fc1_grad = tf.gradients(loss, self.network.fc1)
            # fc2_grad = tf.gradients(loss, self.network.fc2)
            watch_data_idx = 0
            inputs_cube = tf.reshape(
                tf.reduce_sum(tf.squeeze(
                    self.network.cube_input[watch_data_idx, ...]),
                              axis=-1,
                              keep_dims=True), [-1, 30, 30, 1])
            tf.summary.image('extractor_int',
                             tf.reshape(extractor_int, [1, 27, -1, 1]))
            data0_kernel0_outs = tf.transpose(
                tf.reshape(extractor_outs[0, :, :, 2, :], [1, 30, 30, -1]),
                [3, 1, 2, 0])
            data0_kernel1_outs = tf.transpose(
                tf.reshape(extractor_outs[1, :, :, 2, :], [1, 30, 30, -1]))
            data0_kernel2_outs = tf.transpose(
                tf.reshape(extractor_outs[2, :, :, 2, :], [1, 30, 30, -1]))
            data0_kernel3_outs = tf.transpose(
                tf.reshape(extractor_outs[3, :, :, 2, :], [1, 30, 30, -1]))

            tf.summary.image('extractor_inputs_cube', inputs_cube)
            tf.summary.image('extractor_outs1',
                             data0_kernel0_outs,
                             max_outputs=50)
            # tf.summary.image('extractor_outs2', data0_kernel1_outs,max_outputs=50)
            # tf.summary.image('extractor_outs3', data0_kernel2_outs,max_outputs=50)
            # tf.summary.image('extractor_outs2', data0_kernel3_outs,max_outputs=50)

            # tf.summary.image('extractor_two', tf.reshape(tf.transpose(extractor_int),[32,9,3,1]))
            # tf.summary.image('extractor_float', tf.reshape(extractor_float, [-1, 27, 32, 1]))
            # tf.summary.image('conv1_kernel', tf.reshape(self.network.conv1[0], [-1, 27, 32, 1]), max_outputs=3)
            # tf.summary.image('conv2_kernel', tf.reshape(self.network.conv2[0], [-1, 27, 64, 1]), max_outputs=3)
            # tf.summary.image('conv3_kernel', tf.reshape(self.network.conv3[0], [-1, 27, 128, 1]), max_outputs=3)
            #
            # tf.summary.histogram('float_grad', extractor_F_grad)
            # tf.summary.histogram('Int_grad', extractor_Int_grad)
            # tf.summary.histogram('conv1_grad', conv1_grad[0])
            # tf.summary.histogram('conv2_grad', conv2_grad[0])
            # tf.summary.histogram('conv3_grad', conv3_grad[0])
            # tf.summary.histogram('fc1_grad', fc1_grad[0])
            # tf.summary.histogram('fc2_grad', fc2_grad[0])

            tf.summary.scalar('total_loss', loss)
            glb_var = tf.global_variables()
            # for var in glb_var:
            # tf.summary.histogram(var.name, var)
            merged_op = tf.summary.merge_all()

        with tf.name_scope('valid_cubic'):
            epoch_cubic_recall = tf.placeholder(dtype=tf.float32)
            cubic_recall_smy_op = tf.summary.scalar('cubic_recall',
                                                    epoch_cubic_recall)
            epoch_cubic_precise = tf.placeholder(dtype=tf.float32)
            cubic_precise_smy_op = tf.summary.scalar('cubic_precise',
                                                     epoch_cubic_precise)

            epoch_extractor_occupy = tf.placeholder(dtype=tf.float32)
            cubic_occupy_smy_op = tf.summary.scalar('extractor_occupy',
                                                    epoch_extractor_occupy)

            valid_summary_op = tf.summary.merge([
                cubic_recall_smy_op, cubic_precise_smy_op, cubic_occupy_smy_op
            ])

        with tf.name_scope('load_weights'):
            sess.run(tf.global_variables_initializer())
            if self.arg.weights is not None:
                self.network.load_weigths(self.arg.weights, sess, self.saver)
                print 'Loading pre-trained model weights from {:s}'.format(
                    red(self.arg.weights))
            else:
                print 'The network will be {} from default initialization!'.format(
                    yellow('re-trained'))
        timer = Timer()
        if DEBUG:
            pass
            vispy_init()
        cube_label_gt = np.concatenate(
            (np.ones([self.arg.batch_size]), np.zeros([self.arg.batch_size
                                                       ]))).astype(np.int32)
        train_epoch_cnt = int(self.dataset.train_positive_cube_cnt /
                              self.arg.batch_size / 2)
        training_series = range(
            train_epoch_cnt)  # range(train_epoch_cnt)  # train_epoch_cnt
        for epo_cnt in range(self.arg.epoch_iters):
            for data_idx in training_series:
                iter = global_step.eval()
                timer.tic()
                series = self.train_series_Gen(self.arg.batch_size, 'train')
                data_batchP = self.dataset.get_minibatch(series[0],
                                                         data_type='train',
                                                         classify='positive')
                data_batchN = self.dataset.get_minibatch(series[1],
                                                         data_type='train',
                                                         classify='negative')
                data_batch = np.vstack((data_batchP, data_batchN))
                timer.toc()
                time1 = timer.average_time

                timer.tic()
                if self.arg.use_aug_data_method:
                    data_aug = self.cube_augmentation(data_batch,
                                                      aug_data=True,
                                                      DEBUG=False)
                else:
                    data_aug = data_batch
                timer.toc()
                time2 = timer.average_time

                if DEBUG:
                    a = data_batch[data_idx].sum()
                    b = data_batch[data_idx].sum()
                    if a != b:
                        print 'There is some points loss'
                    else:
                        print 'points cnt: ', a
                    box_np_view(data_aug[data_idx],
                                data_aug[data_idx + self.arg.batch_size])
                feed_dict = {
                    self.network.cube_input: data_aug,
                    self.network.cube_label: cube_label_gt,
                }
                timer.tic()
                extractor_outs_,extractor_int_, extractor_float_, cube_probi_, cube_label_, loss_, merge_op_, _ = \
                    sess.run([extractor_outs, extractor_int, extractor_float, cube_probi, cube_label, loss, merged_op,
                              train_op], feed_dict=feed_dict)
                timer.toc()
                # print extractor_outs_.shape,"Look here!"
                if iter % 4 == 0:
                    predict_result = cube_probi_.argmax(axis=1)
                    one_train_hist = fast_hist(cube_label_gt, predict_result)
                    occupy_part_pos = (extractor_int_.reshape(
                        -1) == 1.0).astype(float).sum() / extractor_int_.size
                    occupy_part_neg = (extractor_int_.reshape(
                        -1) == -1.0).astype(float).sum() / extractor_int_.size
                    print 'Training step: {:3d} loss: {:.4f} occupy: +{}% vs -{}% inference_time: {:.3f} '. \
                        format(iter, loss_, int(occupy_part_pos * 100), int(occupy_part_neg * 100), timer.average_time)
                    # print('    class bg precision = {:.3f}  recall = {:.3f}'.format(
                    #     (one_train_hist[0, 0] / (one_train_hist[0, 0] + one_train_hist[1, 0] + 1e-6)),
                    #     (one_train_hist[0, 0] / (one_train_hist[0, 0] + one_train_hist[0, 1] + 1e-6))))
                    print '    class car precision = {:.3f}  recall = {:.3f}'.format(
                        (one_train_hist[1, 1] /
                         (one_train_hist[1, 1] + one_train_hist[0, 1] + 1e-6)),
                        (one_train_hist[1, 1] /
                         (one_train_hist[1, 1] + one_train_hist[1, 0] +
                          1e-6))), '\n'
                    if socket.gethostname() == "szstdzcp0325" and False:
                        with self.printoptions(precision=2,
                                               suppress=False,
                                               linewidth=10000):
                            print 'scores: {}'.format(cube_probi_[:, 1])
                            print 'divine:', str(predict_result)
                            print 'labels:', str(cube_label_), '\n'

                if iter % 1 == 0 and cfg.TRAIN.TENSORBOARD:
                    pass
                    self.writer.add_summary(merge_op_, iter)

                if (iter % 3000 == 0
                        and cfg.TRAIN.DEBUG_TIMELINE) or iter == 200:
                    if socket.gethostname() == "szstdzcp0325":
                        run_options = tf.RunOptions(
                            trace_level=tf.RunOptions.FULL_TRACE)
                        run_metadata = tf.RunMetadata()
                        _ = sess.run([cube_score],
                                     feed_dict=feed_dict,
                                     options=run_options,
                                     run_metadata=run_metadata)
                        # chrome://tracing
                        trace = timeline.Timeline(
                            step_stats=run_metadata.step_stats)
                        trace_file = open(
                            cfg.LOG_DIR + '/' + 'training-step-' +
                            str(iter).zfill(7) + '.ctf.json', 'w')
                        trace_file.write(
                            trace.generate_chrome_trace_format(
                                show_memory=False))
                        trace_file.close()

            if epo_cnt % 10 == 0 and cfg.TRAIN.EPOCH_MODEL_SAVE:
                pass
                self.snapshot(sess, epo_cnt)
            if cfg.TRAIN.USE_VALID:
                with tf.name_scope('valid_cubic_' + str(epo_cnt + 1)):
                    print 'Valid the net at the end of epoch_{} ...'.format(
                        epo_cnt + 1)
                    hist = np.zeros((cfg.NUM_CLASS, cfg.NUM_CLASS),
                                    dtype=np.float32)
                    valid_epoch_cnt = int(
                        self.dataset.valid_positive_cube_cnt /
                        self.arg.batch_size / 2)
                    for data_idx in range(valid_epoch_cnt):
                        series = self.train_series_Gen(self.arg.batch_size,
                                                       'valid')
                        data_batchP = self.dataset.get_minibatch(
                            series[0], data_type='valid', classify='positive')
                        data_batchN = self.dataset.get_minibatch(
                            series[1], data_type='valid', classify='negative')
                        data_batch = np.vstack((data_batchP, data_batchN))

                        feed_dict_ = {
                            self.network.cube_input: data_batch,
                            self.network.cube_label: cube_label_gt,
                        }
                        valid_cls_score_ = sess.run(cube_score,
                                                    feed_dict=feed_dict_)

                        valid_result = valid_cls_score_.argmax(axis=1)
                        one_hist = fast_hist(cube_label_gt, valid_result)
                        hist += one_hist
                        if cfg.TRAIN.VISUAL_VALID:
                            print 'Valid step: {:d}/{:d}'.format(
                                data_idx + 1, valid_epoch_cnt)
                            print(
                                '    class bg precision = {:.3f}  recall = {:.3f}'
                                .format(
                                    (one_hist[0, 0] /
                                     (one_hist[0, 0] + one_hist[1, 0] + 1e-6)),
                                    (one_hist[0, 0] /
                                     (one_hist[0, 0] + one_hist[0, 1] + 1e-6)))
                            )
                            print(
                                '    class car precision = {:.3f}  recall = {:.3f}'
                                .format(
                                    (one_hist[1, 1] /
                                     (one_hist[1, 1] + one_hist[0, 1] + 1e-6)),
                                    (one_hist[1, 1] /
                                     (one_hist[1, 1] + one_hist[1, 0] + 1e-6)))
                            )
                        if data_idx % 20 == 0 and cfg.TRAIN.TENSORBOARD:
                            pass
                            # train_writer.add_summary(valid_result_, data_idx/20+epo_cnt*1000)
                valid_extractor_int_ = sess.run(extractor_int)
                extractor_occupy = valid_extractor_int_.sum(
                ) / valid_extractor_int_.size
                precise_total = hist[1, 1] / (hist[1, 1] + hist[0, 1] + 1e-6)
                recall_total = hist[1, 1] / (hist[1, 1] + hist[1, 0] + 1e-6)
                valid_res = sess.run(valid_summary_op,
                                     feed_dict={
                                         epoch_cubic_recall: recall_total,
                                         epoch_cubic_precise: precise_total,
                                         epoch_extractor_occupy:
                                         extractor_occupy
                                     })
                self.writer.add_summary(valid_res, epo_cnt + 1)
                print 'Validation of epoch_{}: cubic_precision = {:.3f}  cubic_recall = {:.3f}' \
                    .format(epo_cnt + 1, precise_total, recall_total)
            self.shuffle_series()
        print yellow('Training process has done, enjoy every day !')