Exemple #1
0
    def test_path_drop_weights(self):
        # Tests the effect of path-drop on network's feature maps.
        # It sets up a minimal-training process to check the
        # feature before and after running the 'train_op' while
        # path-drop is in effect.

        train_val_test = 'train'
        # overwrite the training iterations
        self.train_config.max_iterations = 2
        self.train_config.overwrite_checkpoints = True

        # Overwrite path drop probabilities
        model_config = config_builder.proto_to_obj(self.model_config)
        model_config.path_drop_probabilities = [0.0, 0.8]

        with tf.Graph().as_default():
            # Set a graph-level seed
            tf.set_random_seed(1245)
            model = RpnModel(model_config,
                             train_val_test=train_val_test,
                             dataset=self.dataset)
            prediction_dict = model.build()
            losses_dict, total_loss = model.loss(prediction_dict)

            global_summaries = set([])
            # Optimizer
            training_optimizer = optimizer_builder.build(
                self.train_config.optimizer, global_summaries)
            train_op = slim.learning.create_train_op(total_loss,
                                                     training_optimizer)

            init_op = tf.global_variables_initializer()

            with tf.Session() as sess:
                sess.run(init_op)
                for step in range(1, self.train_config.max_iterations):
                    feed_dict = model.create_feed_dict()
                    if step == 1:
                        current_feature_maps = sess.run(model.img_feature_maps,
                                                        feed_dict=feed_dict)
                        exp_feature_maps = current_feature_maps
                    train_op_loss = sess.run(train_op, feed_dict=feed_dict)
                    print('Step {}, Total Loss {:0.3f} '.format(
                        step, train_op_loss))

                    updated_feature_maps = sess.run(model.img_feature_maps,
                                                    feed_dict=feed_dict)
            # The feature maps should have remained the same since
            # the image path was dropped
            np.testing.assert_array_almost_equal(updated_feature_maps,
                                                 exp_feature_maps,
                                                 decimal=4)
Exemple #2
0
    def __init__(self, model_config, train_val_test, dataset):
        """
        Args:
            model_config: configuration for the model
            train_val_test: "train", "val", or "test"
            dataset: the dataset that will provide samples and ground truth
        """

        # Sets model configs (_config)
        super(AvodModel, self).__init__(model_config)

        self.dataset = dataset

        # Dataset config
        self._num_final_classes = self.dataset.num_classes + 1

        # Input config
        input_config = self._config.input_config
        self._bev_pixel_size = np.asarray(
            [input_config.bev_dims_h, input_config.bev_dims_w])
        self._bev_depth = input_config.bev_depth

        self._img_pixel_size = np.asarray(
            [input_config.img_dims_h, input_config.img_dims_w])
        self._img_depth = [input_config.img_depth]

        # AVOD config
        avod_config = self._config.avod_config
        self._proposal_roi_crop_size = \
            [avod_config.avod_proposal_roi_crop_size] * 2
        self._positive_selection = avod_config.avod_positive_selection
        self._nms_size = avod_config.avod_nms_size
        self._nms_iou_threshold = avod_config.avod_nms_iou_thresh
        self._path_drop_probabilities = self._config.path_drop_probabilities
        self._box_rep = avod_config.avod_box_representation

        if self._box_rep not in [
                'box_3d', 'box_8c', 'box_8co', 'box_4c', 'box_4ca'
        ]:
            raise ValueError('Invalid box representation', self._box_rep)

        # Create the RpnModel
        self._rpn_model = RpnModel(model_config, train_val_test, dataset)

        if train_val_test not in ["train", "val", "test"]:
            raise ValueError('Invalid train_val_test value,'
                             'should be one of ["train", "val", "test"]')
        self._train_val_test = train_val_test
        self._is_training = (self._train_val_test == 'train')

        self.sample_info = {}
Exemple #3
0
def inference(model_config, eval_config,
              dataset_config, data_split,
              ckpt_indices):

    # Overwrite the defaults
    dataset_config = config_builder.proto_to_obj(dataset_config)

    dataset_config.data_split = data_split
    dataset_config.data_split_dir = 'training'
    if data_split == 'test':
        dataset_config.data_split_dir = 'testing'

    eval_config.eval_mode = 'test'
    eval_config.evaluate_repeatedly = False

    dataset_config.has_labels = False
    # Enable this to see the actually memory being used
    eval_config.allow_gpu_mem_growth = True

    eval_config = config_builder.proto_to_obj(eval_config)
    # Grab the checkpoint indices to evaluate
    eval_config.ckpt_indices = ckpt_indices

    # Remove augmentation during evaluation in test mode
    dataset_config.aug_list = []

    # Build the dataset object
    dataset = DatasetBuilder.build_kitti_dataset(dataset_config,
                                                 use_defaults=False)

    # Setup the model
    model_name = model_config.model_name
    # Overwrite repeated field
    model_config = config_builder.proto_to_obj(model_config)
    # Switch path drop off during evaluation
    model_config.path_drop_probabilities = [1.0, 1.0]

    with tf.Graph().as_default():
        if model_name == 'avod_model':
            model = AvodModel(model_config,
                              train_val_test=eval_config.eval_mode,
                              dataset=dataset)
        elif model_name == 'rpn_model':
            model = RpnModel(model_config,
                             train_val_test=eval_config.eval_mode,
                             dataset=dataset)
        else:
            raise ValueError('Invalid model name {}'.format(model_name))

        model_evaluator = Evaluator(model, dataset_config, eval_config)
        model_evaluator.run_latest_checkpoints()
Exemple #4
0
    def test_rpn_loss(self):
        # Use "val" so that the first sample is loaded each time
        rpn_model = RpnModel(self.model_config,
                             train_val_test="val",
                             dataset=self.dataset)

        predictions = rpn_model.build()

        loss, total_loss = rpn_model.loss(predictions)

        feed_dict = rpn_model.create_feed_dict()

        with self.test_session() as sess:
            init = tf.global_variables_initializer()
            sess.run(init)
            loss_dict_out = sess.run(loss, feed_dict=feed_dict)
            print('Losses ', loss_dict_out)
Exemple #5
0
    def test_disable_path_drop(self):
        # Test path drop is disabled when the probabilities
        # are set to 1.0.

        train_val_test = 'train'
        # Overwrite path drop probabilities
        model_config = config_builder.proto_to_obj(self.model_config)
        model_config.path_drop_probabilities = [1.0, 1.0]

        with tf.Graph().as_default():
            model = RpnModel(model_config,
                             train_val_test=train_val_test,
                             dataset=self.dataset)
            model.build()
            # These variables are set during path drop only
            # in the case of no path-drop, they should be non-existence
            self.assertFalse(hasattr(model, 'img_path_drop_mask'))
            self.assertFalse(hasattr(model, 'bev_path_drop_mask'))
Exemple #6
0
def train(model_config, train_config, dataset_config):

    dataset = DatasetBuilder.build_kitti_dataset(dataset_config,
                                                 use_defaults=False)

    train_val_test = 'train'
    model_name = model_config.model_name

    with tf.Graph().as_default():
        if model_name == 'rpn_model':
            model = RpnModel(model_config,
                             train_val_test=train_val_test,
                             dataset=dataset)
        elif model_name == 'avod_model':
            model = AvodModel(model_config,
                              train_val_test=train_val_test,
                              dataset=dataset)
        else:
            raise ValueError('Invalid model_name')

        trainer.train(model, train_config)
Exemple #7
0
def evaluate(model_config, eval_config, dataset_config):

    # Parse eval config
    eval_mode = eval_config.eval_mode
    if eval_mode not in ['val', 'test']:
        raise ValueError('Evaluation mode can only be set to `val` or `test`')
    evaluate_repeatedly = eval_config.evaluate_repeatedly

    # Parse dataset config
    data_split = dataset_config.data_split
    if data_split == 'train':
        dataset_config.data_split_dir = 'training'
        dataset_config.has_labels = True

    elif data_split.startswith('val'):
        dataset_config.data_split_dir = 'training'

        # Don't load labels for val split when running in test mode
        if eval_mode == 'val':
            dataset_config.has_labels = True
        elif eval_mode == 'test':
            dataset_config.has_labels = False

    elif data_split == 'test':
        dataset_config.data_split_dir = 'testing'
        dataset_config.has_labels = False

    else:
        raise ValueError('Invalid data split', data_split)

    # Convert to object to overwrite repeated fields
    dataset_config = config_builder.proto_to_obj(dataset_config)

    # Remove augmentation during evaluation
    dataset_config.aug_list = []

    # Build the dataset object
    dataset = DatasetBuilder.build_kitti_dataset(dataset_config,
                                                 use_defaults=False)

    # Setup the model
    model_name = model_config.model_name

    # Convert to object to overwrite repeated fields
    model_config = config_builder.proto_to_obj(model_config)

    # Switch path drop off during evaluation
    model_config.path_drop_probabilities = [1.0, 1.0]

    with tf.Graph().as_default():
        if model_name == 'avod_model':
            model = AvodModel(model_config,
                              train_val_test=eval_mode,
                              dataset=dataset)
        elif model_name == 'rpn_model':
            model = RpnModel(model_config,
                             train_val_test=eval_mode,
                             dataset=dataset)
        else:
            raise ValueError('Invalid model name {}'.format(model_name))

        model_evaluator = Evaluator(model, dataset_config, eval_config)

        if evaluate_repeatedly:
            model_evaluator.repeated_checkpoint_run()
        else:
            model_evaluator.run_latest_checkpoints()
Exemple #8
0
    def test_create_path_drop_masks(self):
        # Tests creating path drop choices
        # based on the given probabilities

        rpn_model = RpnModel(self.model_config,
                             train_val_test="val",
                             dataset=self.dataset)
        rpn_model.build()
        ##################################
        # Test-Case 1 : Keep img, Keep bev
        ##################################
        p_img = tf.constant(0.6)
        p_bev = tf.constant(0.85)

        # Set the random numbers for testing purposes
        rand_choice = [0.53, 0.83, 0.05]
        rand_choice_tensor = tf.convert_to_tensor(rand_choice)

        img_mask, bev_mask = rpn_model.create_path_drop_masks(
            p_img, p_bev, rand_choice_tensor)

        with self.test_session():
            img_mask_out = img_mask.eval()
            bev_mask_out = bev_mask.eval()
            np.testing.assert_array_equal(img_mask_out, 1.0)
            np.testing.assert_array_equal(bev_mask_out, 1.0)

        ##################################
        # Test-Case 2 : Kill img, Keep bev
        ##################################
        p_img = tf.constant(0.2)
        p_bev = tf.constant(0.85)

        img_mask, bev_mask = rpn_model.create_path_drop_masks(
            p_img, p_bev, rand_choice_tensor)

        with self.test_session():
            img_mask_out = img_mask.eval()
            bev_mask_out = bev_mask.eval()
            np.testing.assert_array_equal(img_mask_out, 0.0)
            np.testing.assert_array_equal(bev_mask_out, 1.0)

        ##################################
        # Test-Case 3 : Keep img, Kill bev
        ##################################
        p_img = tf.constant(0.9)
        p_bev = tf.constant(0.1)

        img_mask, bev_mask = rpn_model.create_path_drop_masks(
            p_img, p_bev, rand_choice_tensor)

        with self.test_session():
            img_mask_out = img_mask.eval()
            bev_mask_out = bev_mask.eval()
            np.testing.assert_array_equal(img_mask_out, 1.0)
            np.testing.assert_array_equal(bev_mask_out, 0.0)

        ##############################################
        # Test-Case 4 : Kill img, Kill bev, third flip
        ##############################################
        p_img = tf.constant(0.0)
        p_bev = tf.constant(0.1)

        img_mask, bev_mask = rpn_model.create_path_drop_masks(
            p_img, p_bev, rand_choice_tensor)

        with self.test_session():
            img_mask_out = img_mask.eval()
            bev_mask_out = bev_mask.eval()
            np.testing.assert_array_equal(img_mask_out, 0.0)
            # Because of the third condition, we expect to be keeping bev
            np.testing.assert_array_equal(bev_mask_out, 1.0)

        ##############################################
        # Test-Case 5 : Kill img, Kill bev, third flip
        ##############################################
        # Let's flip the third chance and keep img instead
        rand_choice = [0.53, 0.83, 0.61]
        rand_choice_tensor = tf.convert_to_tensor(rand_choice)
        p_img = tf.constant(0.0)
        p_bev = tf.constant(0.1)

        img_mask, bev_mask = rpn_model.create_path_drop_masks(
            p_img, p_bev, rand_choice_tensor)

        with self.test_session():
            img_mask_out = img_mask.eval()
            bev_mask_out = bev_mask.eval()
            # Because of the third condition, we expect to be keeping img
            np.testing.assert_array_equal(img_mask_out, 1.0)
            np.testing.assert_array_equal(bev_mask_out, 0.0)
Exemple #9
0
    def test_path_drop_input_multiplication(self):
        # Tests the result of final image/bev inputs
        # based on the path drop decisions

        rpn_model = RpnModel(self.model_config,
                             train_val_test="val",
                             dataset=self.dataset)
        rpn_model.build()
        # Shape of input feature map
        dummy_img_feature_shape = [1, 30, 50, 2]
        random_values = np.random.randint(low=1.0, high=256.0,
                                          size=2).astype(np.float32)

        dummy_img_feature_map = tf.fill(dummy_img_feature_shape,
                                        random_values[0])
        # Assume both features map are the same size, this is not
        # the case inside the network
        dummy_bev_feature_map = tf.fill(dummy_img_feature_shape,
                                        random_values[1])

        ##################################
        # Test-Case 1 : Keep img, Kill bev
        ##################################
        exp_img_input = np.full(dummy_img_feature_shape, random_values[0])
        exp_bev_input = np.full(dummy_img_feature_shape, 0.0)

        p_img = tf.constant(0.6)
        p_bev = tf.constant(0.4)

        # Set the random numbers for testing purposes
        rand_choice = [0.53, 0.83, 0.05]
        rand_choice_tensor = tf.convert_to_tensor(rand_choice)

        img_mask, bev_mask = rpn_model.create_path_drop_masks(
            p_img, p_bev, rand_choice_tensor)

        final_img_input = tf.multiply(dummy_img_feature_map, img_mask)

        final_bev_input = tf.multiply(dummy_bev_feature_map, bev_mask)

        with self.test_session():
            final_img_input_out = final_img_input.eval()
            final_bev_input_out = final_bev_input.eval()
            np.testing.assert_array_equal(final_img_input_out, exp_img_input)
            np.testing.assert_array_equal(final_bev_input_out, exp_bev_input)

        ##################################
        # Test-Case 2 : Kill img, Keep bev
        ##################################
        exp_img_input = np.full(dummy_img_feature_shape, 0)
        exp_bev_input = np.full(dummy_img_feature_shape, random_values[1])

        p_img = tf.constant(0.4)
        p_bev = tf.constant(0.9)

        img_mask, bev_mask = rpn_model.create_path_drop_masks(
            p_img, p_bev, rand_choice_tensor)

        final_img_input = tf.multiply(dummy_img_feature_map, img_mask)

        final_bev_input = tf.multiply(dummy_bev_feature_map, bev_mask)

        with self.test_session():
            final_img_input_out = final_img_input.eval()
            final_bev_input_out = final_bev_input.eval()
            np.testing.assert_array_equal(final_img_input_out, exp_img_input)
            np.testing.assert_array_equal(final_bev_input_out, exp_bev_input)
Exemple #10
0
    def test_load_model_weights(self):
        # Tests loading weights

        train_val_test = 'train'

        # Overwrite the training iterations
        self.train_config.max_iterations = 1
        self.train_config.overwrite_checkpoints = True

        with tf.Graph().as_default():
            model = RpnModel(self.model_config,
                             train_val_test=train_val_test,
                             dataset=self.dataset)
            trainer.train(model, self.train_config)

            paths_config = self.model_config.paths_config
            rpn_checkpoint_dir = paths_config.checkpoint_dir

            # load the weights back in
            init_op = tf.global_variables_initializer()

            saver = tf.train.Saver()
            with tf.Session() as sess:
                sess.run(init_op)

                trainer_utils.load_checkpoints(rpn_checkpoint_dir, saver)
                checkpoint_to_restore = saver.last_checkpoints[-1]
                trainer_utils.load_model_weights(sess, checkpoint_to_restore)

                rpn_vars = slim.get_model_variables()
                rpn_weights = sess.run(rpn_vars)
                self.assertGreater(len(rpn_weights),
                                   0,
                                   msg='Loaded RPN weights are empty')

        with tf.Graph().as_default():
            model = AvodModel(self.model_config,
                              train_val_test=train_val_test,
                              dataset=self.dataset)
            model.build()

            # load the weights back in
            init_op = tf.global_variables_initializer()

            saver = tf.train.Saver()
            with tf.Session() as sess:
                sess.run(init_op)

                trainer_utils.load_checkpoints(rpn_checkpoint_dir, saver)
                checkpoint_to_restore = saver.last_checkpoints[-1]
                trainer_utils.load_model_weights(sess, checkpoint_to_restore)

                avod_vars = slim.get_model_variables()
                avod_weights = sess.run(avod_vars)

                # AVOD weights should include both RPN + AVOD weights
                self.assertGreater(len(avod_weights),
                                   len(rpn_weights),
                                   msg='Expected more weights for AVOD')

                # grab weights corresponding to RPN by index
                # since the model variables are ordered
                rpn_len = len(rpn_weights)
                loaded_rpn_vars = avod_vars[0:rpn_len]
                rpn_weights_reload = sess.run(loaded_rpn_vars)

                # Make sure the reloaded weights match the originally
                # loaded weights
                for i in range(rpn_len):
                    np.testing.assert_array_equal(rpn_weights_reload[i],
                                                  rpn_weights[i])