コード例 #1
0
    def test_get_clusters(self):

        # classes = ['Car', 'Pedestrian', 'Cyclist']
        num_clusters = [2, 1, 1]

        label_cluster_utils = LabelClusterUtils(self.dataset)
        clusters, std_devs = label_cluster_utils.get_clusters()

        # Check that correct number of clusters are returned
        clusters_per_class = [len(cls_clusters) for cls_clusters in clusters]
        std_devs_per_class = [len(cls_std_devs) for cls_std_devs in std_devs]

        self.assertEqual(clusters_per_class, num_clusters)
        self.assertEqual(std_devs_per_class, num_clusters)

        # Check that text files were saved
        txt_folder_exists = os.path.isdir(monopsr.data_dir() +
                                          '/label_clusters/unittest-kitti')
        self.assertTrue(txt_folder_exists)

        # Calling get_clusters again should read from files
        read_clusters, read_std_devs = label_cluster_utils.get_clusters()

        # Check that read values are the same as generated ones
        np.testing.assert_allclose(np.vstack(clusters),
                                   np.vstack(read_clusters))
        np.testing.assert_allclose(np.vstack(std_devs),
                                   np.vstack(read_std_devs))
コード例 #2
0
ファイル: evaluator_utils.py プロジェクト: weidezhang/monopsr
def run_kitti_native_script_with_low_iou(checkpoint_name, data_split,
                                         kitti_score_threshold, global_step):
    """Runs the low iou kitti native code script."""

    eval_script_dir = monopsr.top_dir(
    ) + '/scripts/offline_eval/kitti_native_eval'
    run_eval_script = eval_script_dir + '/run_eval_low_iou.sh'
    kitti_predictions_dir = monopsr.data_dir() + \
        '/outputs/{}/predictions/kitti_predictions_3d/' \
        '{}/{}/{}'.format(checkpoint_name, data_split, kitti_score_threshold, global_step)
    results_dir = monopsr.top_dir(
    ) + '/scripts/offline_eval/results_low_iou/{}'.format(data_split)
    os.makedirs(results_dir, exist_ok=True)

    # Round this because protobuf encodes default values as full decimal
    kitti_score_threshold = round(kitti_score_threshold, 3)

    subprocess.call([
        run_eval_script,
        str(eval_script_dir),
        str(checkpoint_name),
        str(kitti_score_threshold),
        str(global_step),
        str(kitti_predictions_dir),
        str(results_dir),
    ])
コード例 #3
0
    def __init__(self, dataset):

        self._dataset = dataset

        self.cluster_split = dataset.cluster_split

        self.data_dir = monopsr.data_dir() + '/label_clusters'
        self.clusters = []
        self.std_devs = []
コード例 #4
0
ファイル: demo_utils.py プロジェクト: weidezhang/monopsr
def get_experiment_info(checkpoint_name):
    exp_output_base_dir = monopsr.data_dir() + '/outputs/' + checkpoint_name

    # Parse experiment config
    config_file = exp_output_base_dir + '/{}.yaml'.format(checkpoint_name)
    config = config_utils.parse_yaml_config(config_file)

    predictions_base_dir = exp_output_base_dir + '/predictions'

    return config, predictions_base_dir
コード例 #5
0
ファイル: config_utils.py プロジェクト: weidezhang/monopsr
def parse_yaml_config(yaml_path):
    """Parses a yaml config

    Args:
        yaml_path: path to yaml config

    Returns:
        config_obj: config converted to object
    """

    # Add check for duplicate keys in yaml
    yaml.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
                         no_duplicates_constructor)

    with open(yaml_path, 'r') as yaml_file:
        config_dict = yaml.load(yaml_file)

    config_obj = config_dict_to_object(config_dict)
    config_obj.config_name = os.path.splitext(os.path.basename(yaml_path))[0]
    config_obj.exp_output_dir = monopsr.data_dir(
    ) + '/outputs/' + config_obj.config_name

    # Prepend data folder to paths
    paths_config = config_obj.train_config.paths_config
    if paths_config.checkpoint_dir is None:
        checkpoint_dir = config_obj.exp_output_dir + '/checkpoints'

        if not os.path.exists(checkpoint_dir):
            os.makedirs(checkpoint_dir)

        paths_config.checkpoint_dir = checkpoint_dir
    else:
        paths_config.checkpoint_dir = os.path.expanduser(
            paths_config.checkpoint_dir)

    paths_config.logdir = config_obj.exp_output_dir + '/logs'
    paths_config.pred_dir = config_obj.exp_output_dir + '/predictions'

    return config_obj
コード例 #6
0
ファイル: kitti_dataset.py プロジェクト: weidezhang/monopsr
    def _set_up_directories(self):
        """Sets up data directories."""
        # Setup Directories
        self.rgb_image_dir = self.data_split_dir + '/image_' + str(
            self.cam_idx)
        self.image_2_dir = self.data_split_dir + '/image_2'
        self.image_3_dir = self.data_split_dir + '/image_3'

        self.calib_dir = self.data_split_dir + '/calib'
        self.disp_dir = self.data_split_dir + '/disparity'
        self.planes_dir = self.data_split_dir + '/planes'
        self.velo_dir = self.data_split_dir + '/velodyne'
        self.depth_dir = self.data_split_dir + '/depth_{}_{}'.format(
            self.cam_idx, self.depth_version)
        self.instance_dir = self.data_split_dir + '/instance_{}_{}'.format(
            self.cam_idx, self.instance_version)

        self.mscnn_label_dir = monopsr.data_dir() + \
            '/detections/mscnn/kitti_fmt/{}/merged_{}/data'.format(
                self.data_split, '_'.join(map(str, self.mscnn_thr)))

        if self.has_kitti_labels:
            self.kitti_label_dir = self.data_split_dir + '/label_2'
コード例 #7
0
ファイル: trainer.py プロジェクト: weidezhang/monopsr
def train(model, config):
    """Training function for detection models.

    Args:
        model: The detection model object
        config: config object
    """
    print('Training', config.config_name)

    # Get configurations
    model_config = model.model_config
    train_config = config.train_config

    # Create a variable tensor to hold the global step
    global_step_tensor = tf.Variable(0, trainable=False, name='global_step')

    ##############################
    # Get training configurations
    ##############################
    max_iterations = train_config.max_iterations
    summary_interval = train_config.summary_interval
    checkpoint_interval = \
        train_config.checkpoint_interval
    max_checkpoints = train_config.max_checkpoints_to_keep

    paths_config = train_config.paths_config
    logdir = paths_config.logdir
    if not os.path.exists(logdir):
        os.makedirs(logdir)

    checkpoint_dir = paths_config.checkpoint_dir
    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)
    checkpoint_prefix = checkpoint_dir + '/' + model_config.model_type

    global_summaries = set([])

    # The model should return a dictionary of predictions
    print('Building model...')
    output_dict, gt_dict, output_debug_dict = model.build()
    print('Done building model.')

    # summary_histograms = train_config.summary_histograms
    # summary_img_images = train_config.summary_img_images
    # summary_bev_images = train_config.summary_bev_images

    ##############################
    # Setup loss
    ##############################
    losses_dict, total_loss = model.loss(output_dict, gt_dict)

    # Optimizer
    training_optimizer = optimizer_builder.build(train_config.optimizer,
                                                 global_summaries,
                                                 global_step_tensor)

    # Create the train op
    print('Creating train_op')
    with tf.variable_scope('train_op'):
        train_op = slim.learning.create_train_op(
            total_loss,
            training_optimizer,
            clip_gradient_norm=1.0,
            global_step=global_step_tensor)
    print('Done creating train_op')

    # Save checkpoints regularly.
    saver = tf.train.Saver(max_to_keep=max_checkpoints, pad_step_number=True)

    # Add the result of the train_op to the summary
    tf.summary.scalar('training_loss', train_op)

    # Add maximum memory usage summary op
    # This op can only be run on device with gpu, so it's skipped on Travis
    if 'TRAVIS' not in os.environ:
        tf.summary.scalar('bytes_in_use', tf.contrib.memory_stats.BytesInUse())
        tf.summary.scalar('max_bytes', tf.contrib.memory_stats.MaxBytesInUse())

    summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
    summary_merged = summary_utils.summaries_to_keep(
        summaries,
        global_summaries,
        # histograms=summary_histograms,
        # input_imgs=summary_img_images,
        # input_bevs=summary_bev_images
    )

    allow_gpu_mem_growth = config.allow_gpu_mem_growth
    if allow_gpu_mem_growth:
        # GPU memory config
        sess_config = tf.ConfigProto()
        sess_config.gpu_options.allow_growth = allow_gpu_mem_growth
        sess = tf.Session(config=sess_config)
    else:
        sess = tf.Session()

    # Create unique folder name using datetime for summary writer
    datetime_str = str(datetime.datetime.now())
    logdir = logdir + '/train'
    train_writer = tf.summary.FileWriter(logdir + '/' + datetime_str,
                                         sess.graph)

    # Create init op
    init = tf.global_variables_initializer()

    # Parse type and location of pretrained weights
    net_config = net_builder.get_net_config(model_config)
    pretrained_weights_type = getattr(net_config, 'pretrained_weights_type',
                                      None)
    if pretrained_weights_type is not None:
        pretrained_weights_dir = os.path.join(
            monopsr.data_dir(), 'pretrained',
            net_config.pretrained_weights_name)
        pretrained_weights_path = tf.train.get_checkpoint_state(
            pretrained_weights_dir).model_checkpoint_path
    else:
        pretrained_weights_path = None

    # Overwrite existing checkpoints or continue from last saved checkpoint
    if train_config.overwrite_checkpoints:
        # Initialize the variables
        sess.run(init)
        if pretrained_weights_type == 'slim':
            # Scope is resnet_v2_50 or resnet_v2_101 or vgg_16
            scope = net_config.pretrained_weights_name[:-11]
            checkpoint_utils.restore_weights_by_scope(sess,
                                                      pretrained_weights_path,
                                                      scope)
        elif pretrained_weights_type == 'obj_detection_api':
            checkpoint_utils.restore_obj_detection_api_weights(
                sess, model, pretrained_weights_path)
        elif pretrained_weights_type == 'all':
            saver.restore(sess, pretrained_weights_path)
        else:
            print('Pre-trained weights are not being used.')
    else:
        # Look for existing checkpoints
        checkpoint_utils.load_checkpoints(checkpoint_dir, saver)
        if len(saver.last_checkpoints) > 0:
            checkpoint_to_restore = saver.last_checkpoints[-1]
            saver.restore(sess, checkpoint_to_restore)
        else:
            # Initialize the variables
            sess.run(init)
            if pretrained_weights_type == 'slim':
                # Scope is either resnet_v2_50 or resnet_v2_101
                scope = net_config.pretrained_weights_name[:-11]
                checkpoint_utils.restore_weights_by_scope(
                    sess, pretrained_weights_path, scope)
            elif pretrained_weights_type == 'obj_detection_api':
                checkpoint_utils.restore_obj_detection_api_weights(
                    sess, model, pretrained_weights_path)
            elif pretrained_weights_type == 'all':
                saver.restore(sess, pretrained_weights_path)
            else:
                print('Pre-trained weights are not being used.')

    # Read the global step if restored
    global_step = tf.train.global_step(sess, global_step_tensor)
    print('Starting from step {} / {}'.format(global_step, max_iterations))

    # Main Training Loop
    last_time = time.time()
    for step in range(global_step, max_iterations + 1):

        # Save checkpoint
        if step % checkpoint_interval == 0:
            global_step = tf.train.global_step(sess, global_step_tensor)

            saver.save(sess,
                       save_path=checkpoint_prefix,
                       global_step=global_step)

            print('{}: Step {} / {}: Checkpoint saved to {}-{:08d}'.format(
                config.config_name, step, max_iterations, checkpoint_prefix,
                global_step))

        # Create feed_dict for inferencing
        feed_dict, sample_dict = model.create_feed_dict()

        # DEBUG
        # output = sess.run(output_dict, feed_dict=feed_dict)
        # output_debug = sess.run(output_debug_dict, feed_dict=feed_dict)
        # loss_debug = sess.run(losses_dict, feed_dict=feed_dict)

        # Write summaries and train op
        if step % summary_interval == 0:
            current_time = time.time()
            time_elapsed = current_time - last_time
            last_time = current_time

            train_op_loss, summary_out = sess.run([train_op, summary_merged],
                                                  feed_dict=feed_dict)

            print('{}: Step {}: Total Loss {:0.3f}, Time Elapsed {:0.3f} s'.
                  format(config.config_name, step, train_op_loss,
                         time_elapsed))
            train_writer.add_summary(summary_out, step)

        else:
            # Run the train op only
            sess.run(train_op, feed_dict)

    # Close the summary writers
    train_writer.close()
コード例 #8
0
ファイル: run_inference.py プロジェクト: weidezhang/monopsr
def main(_):
    parser = argparse.ArgumentParser()

    # Example usage
    # --checkpoint_name='monopsr_model_000'
    # --data_split='test'
    # --ckpt_num='80000'
    # Optional arg:
    # --device=0

    default_checkpoint_name = 'monopsr_model_000'

    default_ckpt_num = 'all'
    default_data_split = 'val'
    default_det_2d_score_thr = [0.2, 0.2, 0.2]
    default_device = '0'

    parser.add_argument('--checkpoint_name',
                        type=str,
                        dest='checkpoint_name',
                        default=default_checkpoint_name,
                        help='Checkpoint name must be specified as a str.')

    parser.add_argument('--data_split',
                        type=str,
                        dest='data_split',
                        default=default_data_split,
                        help='Data split must be specified e.g. val or test')

    parser.add_argument('--ckpt_num',
                        nargs='+',
                        dest='ckpt_num',
                        default=default_ckpt_num,
                        help='Checkpoint number ex. 80000')

    parser.add_argument('--det_2d_score_thr',
                        type=int,
                        dest='det_2d_score_thr',
                        default=default_det_2d_score_thr,
                        help='2D detection score threshold.')

    parser.add_argument('--device',
                        type=str,
                        dest='device',
                        default=default_device,
                        help='CUDA device id')

    args = parser.parse_args()

    experiment_config = args.checkpoint_name + '.yaml'

    # Read the config from the experiment folder
    experiment_config_path = monopsr.data_dir() + '/outputs/' + \
                             args.checkpoint_name + '/' + experiment_config

    config = config_utils.parse_yaml_config(experiment_config_path)

    # Overwrite 2D detection score threshold
    config.dataset_config.mscnn_thr = args.det_2d_score_thr

    # Set CUDA device id
    os.environ['CUDA_VISIBLE_DEVICES'] = args.device

    # Run inference
    inference(config, args.data_split, args.ckpt_num)