Пример #1
0
    def on_epoch_end(self, epoch, logs={}):
        self.losses.append(logs.get('loss'))

        evaluation_parameter = predict(self.conf, self.concept, self.positives,
                                       self.vocab, self.entity_model,
                                       self.concept_model, self.model,
                                       self.val_data)
        self.accuracy.append(evaluation_parameter)

        with open(self.history, 'a', encoding='utf-8') as f:
            f.write(
                'Epoch: {0}, Training loss: {1}, validation accuracy: {2}\n'.
                format(epoch, logs.get('loss'), evaluation_parameter))

        if evaluation_parameter > self.best:
            logging.info('Intermediate model saved.')
            self.best = evaluation_parameter
            self.model.save(self.model_path)
            self.wait = 0
            # something here to print trec_eval doc
        else:
            self.wait += 1
            if self.wait > int(self.conf['training']['patience']):
                self.stopped_epoch = epoch
                self.model.stop_training = True
        if self.save and self.model.stop_training:
            logger.info('Saving predictions to {0}'.format(
                self.conf['model']['path_saved_predictions']))
            model_tools.save_predictions(
                self.conf['model']['path_saved_predictions'],
                test_y)  #(filename,predictions)
        logger.info(
            'Testing: epoch: {0}, self.model.stop_training: {1}'.format(
                epoch, self.model.stop_training))
        return
Пример #2
0
    def on_epoch_end(self, epoch, logs={}):
        self.losses.append(logs.get('loss'))
        #before = datetime.now()
        test_y = self.model.predict(self.val_data.x)
        #after = datetime.now()
        #logger.info('Time taken for prediction without speedup:{0}'.format(after-before))
        evaluation_parameter = evaluate(self.val_data.mentions, test_y,
                                        self.val_data.y)
        self.accuracy.append(evaluation_parameter)
        with open(self.history, 'a', encoding='utf-8') as f:
            f.write(
                'Epoch: {0}, Training loss: {1}, validation accuracy: {2}\n'.
                format(epoch, logs.get('loss'), evaluation_parameter))

        if evaluation_parameter > self.best:
            logging.info('Intermediate model saved.')
            self.best = evaluation_parameter
            self.model.save(self.model_path)
            self.wait = 0
            # something here to print trec_eval doc
        else:
            self.wait += 1
            if self.wait > int(self.conf['training']['patience']):
                self.stopped_epoch = epoch
                self.model.stop_training = True
        if self.save and self.model.stop_training:
            logger.info('Saving predictions to {0}'.format(
                self.conf['model']['path_saved_predictions']))
            model_tools.save_predictions(
                self.conf['model']['path_saved_predictions'],
                test_y)  #(filename,predictions)
        logger.info(
            'Testing: epoch: {0}, self.model.stop_training: {1}'.format(
                epoch + 1, self.model.stop_training))
        return