Exemplo n.º 1
0
    def run_checkpoint_once(self, checkpoint_to_restore):
        """Evaluates network metrics once over all the validation samples.

        Args:
            checkpoint_to_restore: The directory of the checkpoint to restore.
        """

        self._saver.restore(self._sess, checkpoint_to_restore)

        data_split = self.dataset_config.data_split
        predictions_base_dir = self.paths_config.pred_dir

        num_samples = self.model.dataset.num_samples
        train_val_test = self.model._train_val_test

        validation = train_val_test == 'val'

        global_step = trainer_utils.get_global_step(self._sess,
                                                    self.global_step_tensor)

        # Rpn average losses dictionary
        if validation:
            eval_rpn_losses = self._create_rpn_losses_dict()

        # Add folders to save predictions
        prop_score_predictions_dir = predictions_base_dir + \
            "/proposals_and_scores/{}/{}".format(
                data_split, global_step)
        trainer_utils.create_dir(prop_score_predictions_dir)

        if self.full_model:
            # Make sure the box representation is valid
            box_rep = self.model_config.avod_config.avod_box_representation
            if box_rep not in [
                    'box_3d', 'box_8c', 'box_8co', 'box_4c', 'box_4ca'
            ]:
                raise ValueError(
                    'Invalid box representation {}.'.format(box_rep))

            avod_predictions_dir = predictions_base_dir + \
                "/final_predictions_and_scores/{}/{}".format(
                    data_split, global_step)
            trainer_utils.create_dir(avod_predictions_dir)

            if box_rep in ['box_8c', 'box_8co', 'box_4c', 'box_4ca']:
                avod_box_corners_dir = predictions_base_dir + \
                    "/final_boxes_{}_and_scores/{}/{}".format(
                        box_rep, data_split, global_step)
                trainer_utils.create_dir(avod_box_corners_dir)

            # Avod average losses dictionary
            eval_avod_losses = self._create_avod_losses_dict()

        num_valid_samples = 0

        # Keep track of feed_dict and inference time
        total_feed_dict_time = []
        total_inference_time = []

        # Run through a single epoch
        current_epoch = self.model.dataset.epochs_completed
        while current_epoch == self.model.dataset.epochs_completed:

            # Keep track of feed_dict speed
            start_time = time.time()
            feed_dict = self.model.create_feed_dict()
            feed_dict_time = time.time() - start_time

            # Get sample name from model
            sample_name = self.model.sample_info['sample_name']

            # File paths for saving proposals and predictions
            rpn_file_path = prop_score_predictions_dir + "/{}.txt".format(
                sample_name)

            if self.full_model:
                avod_file_path = avod_predictions_dir + \
                    "/{}.txt".format(sample_name)

                if box_rep in ['box_8c', 'box_8co', 'box_4c', 'box_4ca']:
                    avod_box_corners_file_path = avod_box_corners_dir + \
                        '/{}.txt'.format(sample_name)

            num_valid_samples += 1
            print("Step {}: {} / {}, Inference on sample {}".format(
                global_step, num_valid_samples, num_samples, sample_name))

            # Do predictions, loss calculations, and summaries
            if validation:
                if self.summary_merged is not None:
                    predictions, eval_losses, eval_total_loss, summary_out = \
                        self._sess.run([self._prediction_dict,
                                        self._loss_dict,
                                        self._total_loss,
                                        self.summary_merged],
                                       feed_dict=feed_dict)
                    self.summary_writer.add_summary(summary_out, global_step)

                else:
                    predictions, eval_losses, eval_total_loss = \
                        self._sess.run([self._prediction_dict,
                                        self._loss_dict,
                                        self._total_loss],
                                       feed_dict=feed_dict)

                rpn_objectness_loss = eval_losses[RpnModel.LOSS_RPN_OBJECTNESS]
                rpn_regression_loss = eval_losses[RpnModel.LOSS_RPN_REGRESSION]

                self._update_rpn_losses(eval_rpn_losses, rpn_objectness_loss,
                                        rpn_regression_loss, eval_total_loss,
                                        global_step)

                # Save proposals
                proposals_and_scores = \
                    self.get_rpn_proposals_and_scores(predictions)
                np.savetxt(rpn_file_path, proposals_and_scores, fmt='%.3f')

                # Save predictions
                predictions_and_scores = \
                    self.get_avod_predicted_boxes_3d_and_scores(predictions,
                                                                box_rep)
                np.savetxt(avod_file_path, predictions_and_scores, fmt='%.5f')

                if self.full_model:
                    if box_rep in ['box_3d', 'box_4ca']:
                        self._update_avod_box_cls_loc_orient_losses(
                            eval_avod_losses, eval_losses, eval_total_loss,
                            global_step)

                    elif box_rep in ['box_8c', 'box_8co', 'box_4c']:
                        self._update_avod_box_cls_loc_losses(
                            eval_avod_losses, eval_losses, eval_total_loss,
                            global_step)

                    if box_rep != 'box_3d':
                        # Save box corners for all box reps
                        # except for box_3d which is not a corner rep
                        predicted_box_corners_and_scores = \
                            self.get_avod_predicted_box_corners_and_scores(
                                predictions, box_rep)
                        np.savetxt(avod_box_corners_file_path,
                                   predicted_box_corners_and_scores,
                                   fmt='%.5f')

                # Calculate accuracies
                self.get_cls_accuracy(predictions, eval_avod_losses,
                                      eval_rpn_losses, global_step)
                print("Step {}: Total time {} s".format(
                    global_step,
                    time.time() - start_time))

            else:
                # Test mode --> train_val_test == 'test'
                inference_start_time = time.time()
                # Don't calculate loss or run summaries for test
                predictions = self._sess.run(self._prediction_dict,
                                             feed_dict=feed_dict)
                inference_time = time.time() - inference_start_time

                # Add times to list
                total_feed_dict_time.append(feed_dict_time)
                total_inference_time.append(inference_time)

                proposals_and_scores = \
                    self.get_rpn_proposals_and_scores(predictions)
                predictions_and_scores = \
                    self.get_avod_predicted_boxes_3d_and_scores(predictions,
                                                                box_rep)

                np.savetxt(rpn_file_path, proposals_and_scores, fmt='%.3f')
                np.savetxt(avod_file_path, predictions_and_scores, fmt='%.5f')

        # end while current_epoch == model.dataset.epochs_completed:

        if validation:
            self.save_proposal_losses_results(eval_rpn_losses,
                                              num_valid_samples, global_step,
                                              predictions_base_dir)
            if self.full_model:
                self.save_prediction_losses_results(eval_avod_losses,
                                                    num_valid_samples,
                                                    global_step,
                                                    predictions_base_dir,
                                                    box_rep=box_rep)

                # Kitti native evaluation, do this during validation
                # and when running Avod model.
                # Store predictions in kitti format
                if self.do_kitti_native_eval:
                    self.run_kitti_native_eval(global_step)

        else:
            # Test mode --> train_val_test == 'test'
            evaluator_utils.print_inference_time_statistics(
                total_feed_dict_time, total_inference_time)

        print("Step {}: Finished evaluation, results saved to {}".format(
            global_step, prop_score_predictions_dir))
Exemplo n.º 2
0
    def run_checkpoint_once(self, checkpoint_to_restore):
        """Evaluates network metrics once over all the validation samples.

        Args:
            checkpoint_to_restore: The directory of the checkpoint to restore.
        """

        self._saver.restore(self._sess, checkpoint_to_restore)

        data_split = self.dataset_config.data_split
        predictions_base_dir = self.paths_config.pred_dir

        num_samples = self.model.dataset.num_samples
        train_val_test = self.model._train_val_test
        print('model: train_val_test: ', train_val_test)

        validation = train_val_test == 'val'

        global_step = trainer_utils.get_global_step(self._sess,
                                                    self.global_step_tensor)

        # Rpn average losses dictionary
        if validation:
            sum_losses = self._create_losses_dict()

        # Make sure the box representation is valid
        predictions_dir = predictions_base_dir + \
                "/final_predictions_and_scores/{}/{}".format(
                data_split, global_step)
        trainer_utils.create_dir(predictions_dir)

        num_valid_samples = 0

        # Keep track of feed_dict and inference time
        total_feed_dict_time = []
        total_inference_time = []

        # Run through a single epoch
        current_epoch = self.model.dataset.epochs_completed

        #run_metadata = tf.RunMetadata()
        #run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        while current_epoch == self.model.dataset.epochs_completed:
            # Keep track of feed_dict speed
            start_time = time.time()
            #feed_dict = self.model.create_feed_dict(sample_index=sample_index)
            feed_dict = self.model.create_feed_dict()
            feed_dict_time = time.time() - start_time

            # Get sample name from model
            sample_name = self.model.sample_info['sample_name']
            stereo_calib = calib_utils.read_calibration(
                self.model.dataset.calib_dir, int(sample_name))
            stereo_calib_p2 = stereo_calib.p2

            output_file_path = predictions_dir + \
                "/{}.txt".format(sample_name)

            num_valid_samples += 1
            #if num_valid_samples > 1:
            #    break
            print("Step {}: {} / {}, Inference on sample {}".format(
                global_step, num_valid_samples, num_samples, sample_name))

            # Do predictions, loss calculations, and summaries

            if validation:
                if self.summary_merged is not None:
                    predictions, eval_losses, eval_total_loss, summary_out = \
                        self._sess.run([self._prediction_dict,
                                        self._loss_dict,
                                        self._total_loss,
                                        self.summary_merged],
                                       feed_dict=feed_dict)

                    if num_valid_samples == 2 and num_samples == 2:
                        self.summary_writer2.add_summary(
                            summary_out, global_step)
                    else:
                        self.summary_writer.add_summary(
                            summary_out, global_step)

                else:
                    print('start inference without smry:')
                    predictions, eval_losses, eval_total_loss = \
                        self._sess.run([self._prediction_dict,
                                        self._loss_dict,
                                        self._total_loss],
                                       feed_dict=feed_dict)
                    #options=run_options,
                    #run_metadata=run_metadata)
                    #self.summary_writer.add_run_metadata(run_metadata, \
                    #        'step {} sp:{}'.format(global_step/1000, int(sample_name)))

                self._update_losses(eval_losses, eval_total_loss, sum_losses,
                                    global_step)
                # Save predictions

                print('save predictions')
                predictions_and_scores = \
                    self.get_predicted_boxes_3d_and_scores(predictions,
                                                            stereo_calib_p2)
                np.savetxt(output_file_path,
                           predictions_and_scores,
                           fmt='%.5f')

                # Calculate accuracies
                #Unnecessary because there is only one class.. object class without bkg class..
                self.get_cls_accuracy(predictions, sum_losses, global_step)
                print("Step {}: Total time {} s".format(
                    global_step,
                    time.time() - start_time))

            else:
                # Test mode --> train_val_test == 'test'
                inference_start_time = time.time()
                # Don't calculate loss or run summaries for test
                predictions = self._sess.run(self._prediction_dict,
                                             feed_dict=feed_dict)
                inference_time = time.time() - inference_start_time

                # Add times to list
                total_feed_dict_time.append(feed_dict_time)
                total_inference_time.append(inference_time)

                predictions_and_scores = \
                    self.get_predicted_boxes_3d_and_scores(predictions,
                                                            stereo_calib_p2)
                np.savetxt(file_path, predictions_and_scores, fmt='%.5f')

        # end while current_epoch == model.dataset.epochs_completed:

        if validation:
            # Kitti native evaluation, do this during validation
            # and when running Avod model.
            # Store predictions in kitti format
            self.save_prediction_losses_results(sum_losses, num_valid_samples, \
                    global_step, predictions_base_dir)
            if self.do_kitti_native_eval:
                pass
                #self.run_kitti_native_eval(global_step)

        else:
            # Test mode --> train_val_test == 'test'
            evaluator_utils.print_inference_time_statistics(
                total_feed_dict_time, total_inference_time)

        print("Step {}: Finished evaluation, results saved to {}".format(
            global_step, predictions_dir))