Пример #1
0
    def __init__(self, model_path, gpu_id=None, random_seed=None):
        torch_setup(gpus=[gpu_id] if gpu_id is not None else [],
                    random_seed=random_seed)

        self._logger = logging.getLogger('nmmt.NMTDecoder')
        self._engines, self._engines_checkpoint = {}, {}
        self._cold_engines, self._warm_engines, self._hot_engines = [], [], []

        # create and put in its map a TextProcessor and a NMTEngine for each line in model.conf
        settings = ConfigParser.ConfigParser()
        settings.read(os.path.join(model_path, 'model.conf'))

        self._cold_size = self._get_int(settings, 'settings', 'cold_size',
                                        1000)
        self._warm_size = self._get_int(settings, 'settings', 'warm_size', 5)
        self._hot_size = self._get_int(settings, 'settings', 'hot_size', 2)

        if self._cold_size < 1:
            raise ValueError("Cold size must be larger than 0!")

        if self._warm_size < 1:
            raise ValueError("Warm size must be larger than 0!")

        if self._hot_size < 1:
            raise ValueError("Hot size must be larger than 0!")

        self._logger.debug("Model sizes: hot:%d warm:%d cold:%d" %
                           (self._hot_size, self._warm_size, self._cold_size))

        if not settings.has_section('models'):
            raise Exception('no model specified in %s' %
                            os.path.join(model_path, 'model.conf'))

        for key, model_name in settings.items('models'):
            model_file = os.path.join(model_path, model_name)

            # the running state of the engines depend on their position in the configration file:
            # the higher in the list the better its state
            with log_timed_action(self._logger,
                                  'Loading "%s" model from checkpoint' % key):
                self._engines[key] = NMTEngine.load_from_checkpoint(model_file)

                if len(self._hot_engines) < self._hot_size:
                    # the engine is automatically created in COLD state
                    # and now it is upgraded to HOT
                    self._engines[key].running_state = NMTEngine.HOT
                    self._hot_engines.append(key)
                elif len(self._warm_engines) < self._warm_size:
                    # the engine is automatically created in WARM state
                    self._engines[key].running_state = NMTEngine.WARM
                    self._warm_engines.append(key)
                else:
                    self._cold_engines.append(key)

        self._logger.debug(
            "Running states of the models: hot:%s, warm:%s, cold:%s" %
            (self._hot_engines, self._warm_engines, self._cold_engines))
        # Public-editable options
        self.beam_size = 5
        self.max_sent_length = 160
Пример #2
0
    def tune(self, suggestions, epochs=None, learning_rate=None):
        # Set tuning parameters
        if epochs is None or learning_rate is None:
            _epochs, _learning_rate = self._estimate_tuning_parameters(
                suggestions)

            epochs = epochs if epochs is not None else _epochs
            learning_rate = learning_rate if learning_rate is not None else _learning_rate

        if learning_rate > 0. and epochs > 0:
            if self._tuner is None:
                from nmmt.NMTEngineTrainer import NMTEngineTrainer

                optimizer = Optim(
                    self.metadata.tuning_optimizer,
                    1.,
                    max_grad_norm=self.metadata.tuning_max_grad_norm)

                tuner_opts = NMTEngineTrainer.Options()
                tuner_opts.log_level = logging.NOTSET

                self._tuner = NMTEngineTrainer(self,
                                               options=tuner_opts,
                                               optimizer=optimizer)

            self._tuner.opts.step_limit = epochs
            self._tuner.reset_learning_rate(learning_rate)

            # Process suggestions
            tuning_src_batch, tuning_trg_batch = [], []

            for suggestion in suggestions:
                source = self.processor.encode_line(suggestion.source,
                                                    is_source=True)
                source = self.src_dict.convertToIdxTensor(
                    source, Constants.UNK_WORD)

                target = self.processor.encode_line(suggestion.target,
                                                    is_source=False)
                target = self.trg_dict.convertToIdxTensor(
                    target, Constants.UNK_WORD, Constants.BOS_WORD,
                    Constants.EOS_WORD)

                tuning_src_batch.append(source)
                tuning_trg_batch.append(target)

            tuning_set = Dataset(tuning_src_batch, tuning_trg_batch,
                                 len(tuning_src_batch), torch_is_using_cuda())
            tuning_set = DatasetWrapper(tuning_set)

            # Run tuning
            log_message = 'Tuning on %d suggestions (epochs = %d, learning_rate = %.3f )' % (
                len(suggestions), self._tuner.opts.step_limit,
                self._tuner.optimizer.lr)
            with log_timed_action(self._logger, log_message, log_start=False):
                self._tuner.train_model(tuning_set)
Пример #3
0
    def translate(self, text, suggestions=None, n_best=1):
        # (1) Process text and suggestions
        processed_text = self._text_processor.encode_line(text, is_source=True)
        processed_suggestions = None

        if self.tuning_epochs > 0 and suggestions is not None and len(
                suggestions) > 0:
            processed_suggestions = [], []

            for suggestion in suggestions:
                processed_suggestions[0].append(
                    self._text_processor.encode_line(suggestion.source,
                                                     is_source=True))
                processed_suggestions[1].append(
                    self._text_processor.encode_line(suggestion.target,
                                                     is_source=False))

        # (2) Tune engine if suggestions provided
        if processed_suggestions is not None:
            msg = 'Tuning engine on %d suggestions (%d epochs)' % (len(
                processed_suggestions[0]), self.tuning_epochs)

            with log_timed_action(self._logger, msg, log_start=False):
                self._engine.tune(*processed_suggestions,
                                  epochs=self.tuning_epochs)

        # (3) Translate
        pred_batch, pred_score = self._engine.translate(
            processed_text,
            n_best=n_best,
            beam_size=self.beam_size,
            max_sent_length=self.max_sent_length,
            replace_unk=self.replace_unk)

        # (4) Reset engine if needed
        if processed_suggestions is not None:
            with log_timed_action(self._logger,
                                  'Restoring model initial state',
                                  log_start=False):
                self._engine.reset_model()

        return self._text_processor.decode_tokens(pred_batch[0])
Пример #4
0
    def reset_model(self):
        with log_timed_action(self._logger,
                              'Restoring model initial state',
                              log_start=False):
            self.model.load_state_dict(self._model_init_state)

            self.model.encoder.rnn.dropout = 0.
            self.model.decoder.dropout = nn.Dropout(0.)
            self.model.decoder.rnn.dropout = nn.Dropout(0.)

            self._model_loaded = True
Пример #5
0
    def get_engine(self, source_lang, target_lang, variant=None):
        key = source_lang + '__' + target_lang

        if variant is not None:
            variant_key = key + '__' + variant

            if variant_key in self._engines:
                key = variant_key
            else:
                self._logger.warning(
                    'Variant "%s" not found, falling back to "%s"' %
                    (variant_key, key))

        key = key.lower()  # ConfigParser seems to interfere with key casing

        if key not in self._engines:
            return None

        engine = self._engines[key]

        if engine.running_state != NMTEngine.HOT:  # the running state of the required engine is COLD; upgrade to HOT
            with log_timed_action(self._logger, 'Upgrading "%s" model' % key):
                if engine.running_state == NMTEngine.WARM:  # the running state of the required engine is WARM
                    self._warm_engines.remove(key)
                else:  # the running state of the required engine is COLD
                    self._cold_engines.remove(key)

                if len(
                        self._hot_engines
                ) >= self._hot_size:  # no more space among the hot engines
                    if len(
                            self._warm_engines
                    ) >= self._warm_size:  # no more space among the warm engines
                        # move the last warm engine to cold
                        tmp_key = self._warm_engines.pop()
                        self._engines[tmp_key].running_state = NMTEngine.COLD
                        self._cold_engines.insert(0, tmp_key)

                    # move the last hot engine to warm, which has at least one space
                    tmp_key = self._hot_engines.pop()
                    self._engines[tmp_key].running_state = NMTEngine.WARM
                    self._warm_engines.insert(0, tmp_key)

                # insert the required engine in the first position  of the hot models
                engine.running_state = NMTEngine.HOT
                self._hot_engines.insert(0, key)

        self._logger.debug(
            "Running states of the models: hot:%s, warm:%s, cold:%s" %
            (self._hot_engines, self._warm_engines, self._cold_engines))

        return engine
Пример #6
0
    def __init__(self, model_path, gpu_id=None, random_seed=None):
        self._logger = logging.getLogger('nmmt.NMTDecoder')

        if gpu_id is not None:
            torch.cuda.set_device(gpu_id)

        if random_seed is not None:
            torch.manual_seed(random_seed)
            random.manual_seed_all(random_seed)

        using_cuda = gpu_id is not None

        self._text_processor = SubwordTextProcessor.load_from_file(
            os.path.join(model_path, 'model.bpe'))
        with log_timed_action(self._logger, 'Loading model from checkpoint'):
            self._engine = NMTEngine.load_from_checkpoint(
                os.path.join(model_path, 'model.pt'), using_cuda=using_cuda)

        # Public-editable options
        self.beam_size = 5
        self.max_sent_length = 160
        self.replace_unk = False
        self.tuning_epochs = 5