def test_disable_path_drop(self): # Test path drop is disabled when the probabilities # are set to 1.0. train_val_test = 'train' # Overwrite path drop probabilities model_config = config_builder.proto_to_obj(self.model_config) model_config.path_drop_probabilities = [1.0, 1.0] with tf.Graph().as_default(): model = RpnModel(model_config, train_val_test=train_val_test, dataset=self.dataset) model.build() # These variables are set during path drop only # in the case of no path-drop, they should be non-existence self.assertFalse(hasattr(model, 'img_path_drop_mask')) self.assertFalse(hasattr(model, 'bev_path_drop_mask'))
def test_path_drop_weights(self): # Tests the effect of path-drop on network's feature maps. # It sets up a minimal-training process to check the # feature before and after running the 'train_op' while # path-drop is in effect. train_val_test = 'train' # overwrite the training iterations self.train_config.max_iterations = 2 self.train_config.overwrite_checkpoints = True # Overwrite path drop probabilities model_config = config_builder.proto_to_obj(self.model_config) model_config.path_drop_probabilities = [0.0, 0.8] with tf.Graph().as_default(): # Set a graph-level seed tf.set_random_seed(1245) model = RpnModel(model_config, train_val_test=train_val_test, dataset=self.dataset) prediction_dict = model.build() losses_dict, total_loss = model.loss(prediction_dict) global_summaries = set([]) # Optimizer training_optimizer = optimizer_builder.build( self.train_config.optimizer, global_summaries) train_op = slim.learning.create_train_op(total_loss, training_optimizer) init_op = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init_op) for step in range(1, self.train_config.max_iterations): feed_dict = model.create_feed_dict() if step == 1: current_feature_maps = sess.run(model.img_feature_maps, feed_dict=feed_dict) exp_feature_maps = current_feature_maps train_op_loss = sess.run(train_op, feed_dict=feed_dict) print('Step {}, Total Loss {:0.3f} '.format( step, train_op_loss)) updated_feature_maps = sess.run(model.img_feature_maps, feed_dict=feed_dict) # The feature maps should have remained the same since # the image path was dropped np.testing.assert_array_almost_equal(updated_feature_maps, exp_feature_maps, decimal=4)
def test_rpn_loss(self): # Use "val" so that the first sample is loaded each time rpn_model = RpnModel(self.model_config, train_val_test="val", dataset=self.dataset) predictions = rpn_model.build() loss, total_loss = rpn_model.loss(predictions) feed_dict = rpn_model.create_feed_dict() with self.test_session() as sess: init = tf.global_variables_initializer() sess.run(init) loss_dict_out = sess.run(loss, feed_dict=feed_dict) print('Losses ', loss_dict_out)
def test_create_path_drop_masks(self): # Tests creating path drop choices # based on the given probabilities rpn_model = RpnModel(self.model_config, train_val_test="val", dataset=self.dataset) rpn_model.build() ################################## # Test-Case 1 : Keep img, Keep bev ################################## p_img = tf.constant(0.6) p_bev = tf.constant(0.85) # Set the random numbers for testing purposes rand_choice = [0.53, 0.83, 0.05] rand_choice_tensor = tf.convert_to_tensor(rand_choice) img_mask, bev_mask = rpn_model.create_path_drop_masks( p_img, p_bev, rand_choice_tensor) with self.test_session(): img_mask_out = img_mask.eval() bev_mask_out = bev_mask.eval() np.testing.assert_array_equal(img_mask_out, 1.0) np.testing.assert_array_equal(bev_mask_out, 1.0) ################################## # Test-Case 2 : Kill img, Keep bev ################################## p_img = tf.constant(0.2) p_bev = tf.constant(0.85) img_mask, bev_mask = rpn_model.create_path_drop_masks( p_img, p_bev, rand_choice_tensor) with self.test_session(): img_mask_out = img_mask.eval() bev_mask_out = bev_mask.eval() np.testing.assert_array_equal(img_mask_out, 0.0) np.testing.assert_array_equal(bev_mask_out, 1.0) ################################## # Test-Case 3 : Keep img, Kill bev ################################## p_img = tf.constant(0.9) p_bev = tf.constant(0.1) img_mask, bev_mask = rpn_model.create_path_drop_masks( p_img, p_bev, rand_choice_tensor) with self.test_session(): img_mask_out = img_mask.eval() bev_mask_out = bev_mask.eval() np.testing.assert_array_equal(img_mask_out, 1.0) np.testing.assert_array_equal(bev_mask_out, 0.0) ############################################## # Test-Case 4 : Kill img, Kill bev, third flip ############################################## p_img = tf.constant(0.0) p_bev = tf.constant(0.1) img_mask, bev_mask = rpn_model.create_path_drop_masks( p_img, p_bev, rand_choice_tensor) with self.test_session(): img_mask_out = img_mask.eval() bev_mask_out = bev_mask.eval() np.testing.assert_array_equal(img_mask_out, 0.0) # Because of the third condition, we expect to be keeping bev np.testing.assert_array_equal(bev_mask_out, 1.0) ############################################## # Test-Case 5 : Kill img, Kill bev, third flip ############################################## # Let's flip the third chance and keep img instead rand_choice = [0.53, 0.83, 0.61] rand_choice_tensor = tf.convert_to_tensor(rand_choice) p_img = tf.constant(0.0) p_bev = tf.constant(0.1) img_mask, bev_mask = rpn_model.create_path_drop_masks( p_img, p_bev, rand_choice_tensor) with self.test_session(): img_mask_out = img_mask.eval() bev_mask_out = bev_mask.eval() # Because of the third condition, we expect to be keeping img np.testing.assert_array_equal(img_mask_out, 1.0) np.testing.assert_array_equal(bev_mask_out, 0.0)
def test_path_drop_input_multiplication(self): # Tests the result of final image/bev inputs # based on the path drop decisions rpn_model = RpnModel(self.model_config, train_val_test="val", dataset=self.dataset) rpn_model.build() # Shape of input feature map dummy_img_feature_shape = [1, 30, 50, 2] random_values = np.random.randint(low=1.0, high=256.0, size=2).astype(np.float32) dummy_img_feature_map = tf.fill(dummy_img_feature_shape, random_values[0]) # Assume both features map are the same size, this is not # the case inside the network dummy_bev_feature_map = tf.fill(dummy_img_feature_shape, random_values[1]) ################################## # Test-Case 1 : Keep img, Kill bev ################################## exp_img_input = np.full(dummy_img_feature_shape, random_values[0]) exp_bev_input = np.full(dummy_img_feature_shape, 0.0) p_img = tf.constant(0.6) p_bev = tf.constant(0.4) # Set the random numbers for testing purposes rand_choice = [0.53, 0.83, 0.05] rand_choice_tensor = tf.convert_to_tensor(rand_choice) img_mask, bev_mask = rpn_model.create_path_drop_masks( p_img, p_bev, rand_choice_tensor) final_img_input = tf.multiply(dummy_img_feature_map, img_mask) final_bev_input = tf.multiply(dummy_bev_feature_map, bev_mask) with self.test_session(): final_img_input_out = final_img_input.eval() final_bev_input_out = final_bev_input.eval() np.testing.assert_array_equal(final_img_input_out, exp_img_input) np.testing.assert_array_equal(final_bev_input_out, exp_bev_input) ################################## # Test-Case 2 : Kill img, Keep bev ################################## exp_img_input = np.full(dummy_img_feature_shape, 0) exp_bev_input = np.full(dummy_img_feature_shape, random_values[1]) p_img = tf.constant(0.4) p_bev = tf.constant(0.9) img_mask, bev_mask = rpn_model.create_path_drop_masks( p_img, p_bev, rand_choice_tensor) final_img_input = tf.multiply(dummy_img_feature_map, img_mask) final_bev_input = tf.multiply(dummy_bev_feature_map, bev_mask) with self.test_session(): final_img_input_out = final_img_input.eval() final_bev_input_out = final_bev_input.eval() np.testing.assert_array_equal(final_img_input_out, exp_img_input) np.testing.assert_array_equal(final_bev_input_out, exp_bev_input)
def test_load_model_weights(self): # Tests loading weights train_val_test = 'train' # Overwrite the training iterations self.train_config.max_iterations = 1 self.train_config.overwrite_checkpoints = True with tf.Graph().as_default(): model = RpnModel(self.model_config, train_val_test=train_val_test, dataset=self.dataset) trainer.train(model, self.train_config) paths_config = self.model_config.paths_config rpn_checkpoint_dir = paths_config.checkpoint_dir # load the weights back in init_op = tf.global_variables_initializer() saver = tf.train.Saver() with tf.Session() as sess: sess.run(init_op) trainer_utils.load_checkpoints(rpn_checkpoint_dir, saver) checkpoint_to_restore = saver.last_checkpoints[-1] trainer_utils.load_model_weights(sess, checkpoint_to_restore) rpn_vars = slim.get_model_variables() rpn_weights = sess.run(rpn_vars) self.assertGreater(len(rpn_weights), 0, msg='Loaded RPN weights are empty') with tf.Graph().as_default(): model = AvodModel(self.model_config, train_val_test=train_val_test, dataset=self.dataset) model.build() # load the weights back in init_op = tf.global_variables_initializer() saver = tf.train.Saver() with tf.Session() as sess: sess.run(init_op) trainer_utils.load_checkpoints(rpn_checkpoint_dir, saver) checkpoint_to_restore = saver.last_checkpoints[-1] trainer_utils.load_model_weights(sess, checkpoint_to_restore) avod_vars = slim.get_model_variables() avod_weights = sess.run(avod_vars) # AVOD weights should include both RPN + AVOD weights self.assertGreater(len(avod_weights), len(rpn_weights), msg='Expected more weights for AVOD') # grab weights corresponding to RPN by index # since the model variables are ordered rpn_len = len(rpn_weights) loaded_rpn_vars = avod_vars[0:rpn_len] rpn_weights_reload = sess.run(loaded_rpn_vars) # Make sure the reloaded weights match the originally # loaded weights for i in range(rpn_len): np.testing.assert_array_equal(rpn_weights_reload[i], rpn_weights[i])