Пример #1
0
    def predict_for_evalai(self, dataset_type):
        reporter = self.dataset_loader.get_test_reporter(dataset_type)
        with torch.no_grad():
            self.model.eval()
            message = "Starting {} inference for evalai".format(dataset_type)
            self.writer.write(message)

            while reporter.next_dataset():
                dataloader = reporter.get_dataloader()

                for batch in tqdm(dataloader):
                    prepared_batch = reporter.prepare_batch(batch)
                    model_output = self.model(prepared_batch)
                    prepared_batch.update(model_output)
                    if visualization_flag:
                        self._summarize_visualization(prepared_batch)
                    report = Report(prepared_batch, model_output)
                    reporter.add_to_report(report)

            self.writer.write("Finished predicting")
            if visualization_flag:
                # with open('textcap_visualization.json', 'w+') as f:
                #     json.dump(val_predictions, f)
                torch.save(val_predictions, 'textcap_visualization.json')
                print('success save the visulaization file, Break')
            self.model.train()
Пример #2
0
    def _forward_pass(self, batch):
        prepared_batch = self.dataset_loader.prepare_batch(batch)
        self.profile("Batch prepare time")
        # Arguments should be a dict at this point
        model_output = self.model(prepared_batch)
        report = Report(prepared_batch, model_output)
        self.profile("Forward time")

        return report
Пример #3
0
    def _forward_pass(self, batch):
        
        prepared_batch = self.task_loader.prepare_batch(batch)
        self.profile("Batch prepare time")
        model_output = self.model(prepared_batch)
        report = Report(prepared_batch, model_output)
        self.profile("Forward time")

        return report
Пример #4
0
    def _forward_pass(self, batch):
        prepared_batch = self.task_loader.prepare_batch(batch)
        self.profile("Batch prepare time")

        # Arguments should be a dict at this point
        model_output = self.model(prepared_batch)  # a dict of losses, metrics, scores, and att
        report = Report(prepared_batch, model_output)
        self.profile("Forward time")

        return report, (model_output["att"] if "att" in model_output.keys() else None)
Пример #5
0
    def _forward_pass(self, batch):
        prepared_batch = self.task_loader.prepare_batch(batch)
        self.profile("Batch prepare time")

        # Arguments should be a dict at this point
        model_output = self.model(prepared_batch)
        print('model_output["captions"]', model_output['captions'])
        report = Report(prepared_batch, model_output)
        self.profile("Forward time")

        return report
Пример #6
0
    def predict_for_evalai(self, dataset_type):
        reporter = self.task_loader.get_test_reporter(dataset_type)
        with torch.no_grad():
            self.model.eval()
            message = "Starting {} inference for evalai".format(dataset_type)
            self.writer.write(message)

            while reporter.next_dataset():
                dataloader = reporter.get_dataloader()

                for batch in tqdm(dataloader):
                    prepared_batch = reporter.prepare_batch(batch)
                    model_output = self.model(prepared_batch)
                    report = Report(prepared_batch, model_output)
                    reporter.add_to_report(report)

            self.writer.write("Finished predicting")
            self.model.train()