Exemplo n.º 1
0
    def _initialize(self):
        # Initializes the non-serialized bits of the class.
        self._set_random_seed(self.config.seed)

        # state for prediction caching
        self._predictions = None
        self._cached_predict = False
        self._closed = False
        self._to_pull = 0

        try:
            self.estimator_dir = os.path.abspath(
                os.path.join(self.config.tensorboard_folder,
                             str(int(time.time()))))
            pathlib.Path(self.estimator_dir).mkdir(parents=True, exist_ok=True)
            self._tmp_dir = None
        except (TypeError, IOError):
            # TypeError --> tensorboard_folder is None
            # IOError --> user likely does not have permission to write to the tensorboard_folder directory
            # Both cases we can resolve by
            self._tmp_dir = tempfile.TemporaryDirectory(prefix="Finetune")
            self.estimator_dir = self._tmp_dir.name
            LOGGER.info("Saving tensorboard output to {}".format(
                self.estimator_dir))

        self.saver = Saver(
            fallback_filename=self.config.base_model_path,
            exclude_matches=None if self.config.save_adam_vars else "Adam",
            variable_transforms=[
                embedding_preprocessor(self.input_pipeline, self.config)
            ],
            save_dtype=self.config.save_dtype,
            target_model_init_from_base_model=self.config.
            target_model_init_from_base_model)
Exemplo n.º 2
0
    def load(path, *args, **kwargs):
        """
        Load a saved fine-tuned model from disk.  Path provided should be a folder which contains .pkl and tf.Saver() files

        :param path: string path name to load model from.  Same value as previously provided to :meth:`save`. Must be a folder.
        :param **kwargs: key-value pairs of config items to override.
        """
        if type(path) != str and not hasattr(path, "write"):
            instance = path
            raise FinetuneError(
                'The .load() method can only be called on the class, not on an instance. Try `{}.load("{}") instead.'
                .format(instance.__class__.__name__, args[0]))

        assert_valid_config(**kwargs)

        saver = Saver()
        model = saver.load(path)

        # Backwards compatability
        # Ensure old models get new default settings
        for setting, default in get_default_config().items():
            if not hasattr(model.config, setting):
                if setting == "add_eos_bos_to_chunk":
                    model.config.add_eos_bos_to_chunk = False
                else:
                    model.config.update({setting: default})

        model.config.update(kwargs)
        model.input_pipeline.config = model.config
        download_data_if_required(model.config.base_model)
        saver.set_fallback(model.config.base_model_path)
        model._initialize()
        model.saver.variables = saver.variables
        model._trained = True
        return model
Exemplo n.º 3
0
    def _initialize(self):
        # Initializes the non-serialized bits of the class.
        self._set_random_seed(self.config.seed)

        download_data_if_required()
        self.encoder = TextEncoder()

        # symbolic ops
        self.logits = None  # classification logits
        self.target_loss = None  # cross-entropy loss
        self.lm_losses = None  # language modeling losses
        self.lm_predict_op = None
        self.train = None  # gradient + parameter update
        self.features = None  # hidden representation fed to classifier
        self.summaries = None  # Tensorboard summaries
        self.train_writer = None
        self.valid_writer = None
        self.predict_params = None
        self.train_op = None
        self.predict_op = None
        self.predict_proba_op = None
        self.sess = None
        self.noop = tf.no_op()

        # indicator vars
        self.is_built = False  # has tf graph been constructed?
        self.is_trained = False  # has model been fine-tuned?
        self.require_lm = False

        def process_embeddings(name, value):
            if "/we:0" not in name:
                return value

            vocab_size = self.encoder.vocab_size
            word_embeddings = value[:vocab_size -
                                    len(self.encoder.special_tokens)]
            special_embed = value[len(word_embeddings):vocab_size]
            positional_embed = value[vocab_size:]
            if self.config.interpolate_pos_embed and self.config.max_length != len(
                    positional_embed):
                positional_embed = interpolate_pos_embed(
                    positional_embed, self.config.max_length)
            elif self.config.max_length > len(positional_embed):
                raise ValueError(
                    "Max Length cannot be greater than {} if interploate_pos_embed is turned off"
                    .format(len(positional_embed)))
            else:
                positional_embed = positional_embed[:self.config.max_length]

            embeddings = np.concatenate(
                (word_embeddings, special_embed, positional_embed), axis=0)
            return embeddings

        self.saver = Saver(
            fallback_filename=JL_BASE,
            exclude_matches=None if self.config.save_adam_vars else "adam",
            variable_transforms=[process_embeddings])
Exemplo n.º 4
0
    def load(cls, path):
        """
        Load a saved fine-tuned model from disk.  Path provided should be a folder which contains .pkl and tf.Saver() files

        :param path: string path name to load model from.  Same value as previously provided to :meth:`save`. Must be a folder.
        """
        saver = Saver(JL_BASE)
        model = saver.load(path)
        model._initialize()
        model.saver.variables = saver.variables
        return model
Exemplo n.º 5
0
    def load(cls, path):
        """
        Load a saved fine-tuned model from disk.  Path provided should be a folder which contains .pkl and tf.Saver() files

        :param path: string path name to load model from.  Same value as previously provided to :meth:`save`. Must be a folder.
        """
        saver = Saver(JL_BASE)
        model = saver.load(path)
        model._initialize()
        model.saver.variables = saver.variables
        tf.reset_default_graph()
        return model
Exemplo n.º 6
0
    def load(cls, path, **kwargs):
        """
        Load a saved fine-tuned model from disk.  Path provided should be a folder which contains .pkl and tf.Saver() files

        :param path: string path name to load model from.  Same value as previously provided to :meth:`save`. Must be a folder.
        :param **kwargs: key-value pairs of config items to override.
        """
        assert_valid_config(**kwargs)
        download_data_if_required()
        saver = Saver()
        model = saver.load(path)
        model.config.update(kwargs)
        saver.set_fallback(model.config.base_model_path)
        model._initialize()
        model.saver.variables = saver.variables
        return model
Exemplo n.º 7
0
    def _initialize(self):
        # Initializes the non-serialized bits of the class.
        self._set_random_seed(self.config.seed)
        self.estimator_ = None
        if self.config.tensorboard_folder is not None:
            self.estimator_dir = os.path.abspath(
                os.path.join(self.config.tensorboard_folder,
                             str(int(time.time()))))
            pathlib.Path(self.estimator_dir).mkdir(parents=True, exist_ok=True)
            self.cleanup_glob = None
        else:
            self.estimator_dir = tempfile.mkdtemp(prefix="Finetune")
            self.cleanup_glob = self.estimator_dir

        def process_embeddings(name, value):
            if "/we:0" not in name:
                return value

            vocab_size = ENCODER.vocab_size
            word_embeddings = value[:vocab_size - len(ENCODER.special_tokens)]
            special_embed = value[len(word_embeddings):vocab_size]
            positional_embed = value[vocab_size:]
            if self.config.interpolate_pos_embed and self.config.max_length != len(
                    positional_embed):
                positional_embed = interpolate_pos_embed(
                    positional_embed, self.config.max_length)
            elif self.config.max_length > len(positional_embed):
                raise ValueError(
                    "Max Length cannot be greater than {} if interploate_pos_embed is turned off"
                    .format(len(positional_embed)))
            else:
                positional_embed = positional_embed[:self.config.max_length]

            embeddings = np.concatenate(
                (word_embeddings, special_embed, positional_embed), axis=0)
            return embeddings

        base_model_path = os.path.join(os.path.dirname(__file__), "model",
                                       self.config.base_model_path)
        self.saver = Saver(
            fallback_filename=base_model_path,
            exclude_matches=None if self.config.save_adam_vars else "Adam",
            variable_transforms=[process_embeddings],
            save_dtype=self.config.save_dtype)
Exemplo n.º 8
0
    def _initialize(self):
        # Initializes the non-serialized bits of the class.
        self._set_random_seed(self.config.seed)

        download_data_if_required()
        self.encoder = TextEncoder()

        # symbolic ops
        self.logits = None  # classification logits
        self.target_loss = None  # cross-entropy loss
        self.lm_losses = None  # language modeling losses
        self.lm_predict_op = None
        self.train = None  # gradient + parameter update
        self.features = None  # hidden representation fed to classifier
        self.summaries = None  # Tensorboard summaries
        self.train_writer = None
        self.valid_writer = None
        self.predict_params = None
        self.train_op = None
        self.predict_op = None
        self.predict_proba_op = None
        self.sess = None
        self.noop = tf.no_op()

        # indicator vars
        self.is_built = False  # has tf graph been constructed?
        self.is_trained = False  # has model been fine-tuned?
        self.require_lm = False

        def process_embeddings(name, value):
            if "/we:0" not in name:
                return value

            vocab_size = self.encoder.vocab_size
            word_embeddings = value[:vocab_size - len(self.encoder.special_tokens)]
            special_embed = value[len(word_embeddings): vocab_size]
            positional_embed = value[vocab_size:]
            if self.config.interpolate_pos_embed and self.config.max_length != len(positional_embed):
                positional_embed = interpolate_pos_embed(positional_embed, self.config.max_length)
            elif self.config.max_length > len(positional_embed):
                raise ValueError("Max Length cannot be greater than {} if interploate_pos_embed is turned off".format(len(positional_embed)))
            else:
                positional_embed = positional_embed[:self.config.max_length]

            embeddings = np.concatenate((word_embeddings, special_embed, positional_embed), axis=0)
            return embeddings

        self.saver = Saver(
            fallback_filename=JL_BASE,
            exclude_matches=None if self.config.save_adam_vars else "adam",
            variable_transforms=[process_embeddings]
        )
Exemplo n.º 9
0
    def load(path, *args, **kwargs):
        """
        Load a saved fine-tuned model from disk.  Path provided should be a folder which contains .pkl and tf.Saver() files

        :param path: string path name to load model from.  Same value as previously provided to :meth:`save`. Must be a folder.
        :param **kwargs: key-value pairs of config items to override.
        """
        if type(path) != str:
            instance = path
            raise FinetuneError(
                "The .load() method can only be called on the class, not on an instance. Try `{}.load(\"{}\") instead.".format(
                    instance.__class__.__name__, args[0]
                )
            )

        assert_valid_config(**kwargs)
        saver = Saver()
        model = saver.load(path)
        model.config.update(kwargs)
        download_data_if_required(model.config.base_model)
        saver.set_fallback(model.config.base_model_path)
        model._initialize()
        model.saver.variables = saver.variables
        return model
Exemplo n.º 10
0
class BaseModel(object, metaclass=ABCMeta):
    """
    A sklearn-style task agnostic base class for finetuning a Transformer language model.
    """
    def __init__(self, config=None, **kwargs):
        """ 
        For a full list of configuration options, see `finetune.config`.
        
        :param config: A config object generated by `finetune.config.get_config` or None (for default config).
        :param **kwargs: key-value pairs of config items to override.
        """

        weak_self = weakref.ref(self)

        def cleanup():
            strong_self = weak_self()
            if strong_self is not None:
                BaseModel.__del__(strong_self)

        atexit.register(cleanup)

        self.config = config or get_default_config()
        self.config.update(kwargs)

        if self.config.num_layers_trained != self.config.n_layer and self.config.train_embeddings:
            raise ValueError(
                "If you are only finetuning a subset of the layers, you cannot finetune embeddings."
            )

        self.input_pipeline = self._get_input_pipeline()
        download_data_if_required()
        self._initialize()

    @abstractmethod
    def _get_input_pipeline(self):
        pass

    def _initialize(self):
        # Initializes the non-serialized bits of the class.
        self._set_random_seed(self.config.seed)
        self.estimator_ = None
        if self.config.tensorboard_folder is not None:
            self.estimator_dir = os.path.abspath(
                os.path.join(self.config.tensorboard_folder,
                             str(int(time.time()))))
            pathlib.Path(self.estimator_dir).mkdir(parents=True, exist_ok=True)
            self.cleanup_glob = None
        else:
            self.estimator_dir = tempfile.mkdtemp(prefix="Finetune")
            self.cleanup_glob = self.estimator_dir

        def process_embeddings(name, value):
            if "/we:0" not in name:
                return value

            vocab_size = ENCODER.vocab_size
            word_embeddings = value[:vocab_size - len(ENCODER.special_tokens)]
            special_embed = value[len(word_embeddings):vocab_size]
            positional_embed = value[vocab_size:]
            if self.config.interpolate_pos_embed and self.config.max_length != len(
                    positional_embed):
                positional_embed = interpolate_pos_embed(
                    positional_embed, self.config.max_length)
            elif self.config.max_length > len(positional_embed):
                raise ValueError(
                    "Max Length cannot be greater than {} if interploate_pos_embed is turned off"
                    .format(len(positional_embed)))
            else:
                positional_embed = positional_embed[:self.config.max_length]

            embeddings = np.concatenate(
                (word_embeddings, special_embed, positional_embed), axis=0)
            return embeddings

        base_model_path = os.path.join(os.path.dirname(__file__), "model",
                                       self.config.base_model_path)
        self.saver = Saver(
            fallback_filename=base_model_path,
            exclude_matches=None if self.config.save_adam_vars else "Adam",
            variable_transforms=[process_embeddings],
            save_dtype=self.config.save_dtype)

    @abstractmethod
    def _predict_op(self, logits, **kwargs):
        raise NotImplementedError

    @abstractmethod
    def _predict_proba_op(self, logits, **kwargs):
        raise NotImplementedError

    @abstractmethod
    def _target_model(self,
                      *,
                      featurizer_state,
                      targets,
                      n_outputs,
                      train=False,
                      reuse=None,
                      **kwargs):
        # Overridden by subclass to attach a target model onto the shared base featurizer.
        raise NotImplementedError

    def _n_steps(self, n_examples, batch_size, n_gpus):
        steps = int(math.ceil(n_examples / (batch_size * n_gpus)))
        return steps

    def finetune(self, Xs, Y=None, batch_size=None):
        if not callable(Xs) and Y is not None and len(Xs) != len(Y):
            raise FinetuneError(
                "Mismatch between number of examples ({}) and number of targets ({}) provided."
                .format(len(Xs), len(Y)))
        batch_size = batch_size or self.config.batch_size

        val_input_fn, train_input_fn, val_size, val_interval = self.input_pipeline.get_train_input_fns(
            Xs, Y, batch_size=batch_size)
        if val_size <= 10 and self.config.keep_best_model:
            tf.logging.warning(
                "Early stopping / keeping best model with a validation size of {} is likely to case undesired results"
                .format(val_size))

        steps_per_epoch = self._n_steps(n_examples=self.config.dataset_size,
                                        batch_size=batch_size,
                                        n_gpus=max(
                                            1, len(self.config.visible_gpus)))
        num_steps = steps_per_epoch * self.config.n_epochs
        estimator = self.get_estimator()
        train_hooks = [
            self.saver.get_saver_hook(
                estimator=estimator,
                keep_best_model=self.config.keep_best_model,
                steps_per_epoch=steps_per_epoch,
                early_stopping_steps=self.config.early_stopping_steps,
                eval_frequency=val_interval)
        ]
        if val_size > 0:
            train_hooks.append(
                tf.contrib.estimator.InMemoryEvaluatorHook(
                    estimator,
                    val_input_fn,
                    every_n_iter=val_interval,
                    steps=val_size // batch_size))

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            estimator.train(train_input_fn, hooks=train_hooks, steps=num_steps)

    def get_estimator(self, force_build_lm=False):
        conf = tf.ConfigProto(
            allow_soft_placement=self.config.soft_device_placement,
            log_device_placement=self.config.log_device_placement,
        )
        num_gpus = len(self.config.visible_gpus)
        if num_gpus > 1:
            distribute_strategy = PatchedParameterServerStrategy(
                num_gpus_per_worker=num_gpus)
        else:
            distribute_strategy = None

        config = tf.estimator.RunConfig(
            tf_random_seed=self.config.seed,
            save_summary_steps=self.config.val_interval,
            save_checkpoints_secs=None,
            save_checkpoints_steps=None,
            # disable auto summaries
            session_config=conf,
            log_step_count_steps=100,
            train_distribute=distribute_strategy,
            keep_checkpoint_max=1)

        model_fn = get_model_fn(
            target_model_fn=self._target_model,
            predict_op=self._predict_op,
            predict_proba_op=self._predict_proba_op,
            build_target_model=self.input_pipeline.target_dim is not None,
            build_lm=force_build_lm or self.config.lm_loss_coef > 0.0
            or self.input_pipeline.target_dim is None,
            encoder=ENCODER,
            target_dim=self.input_pipeline.target_dim,
            label_encoder=self.input_pipeline.label_encoder,
            saver=self.saver)
        return tf.estimator.Estimator(model_dir=self.estimator_dir,
                                      model_fn=model_fn,
                                      config=config,
                                      params=self.config)

    def _inference(self, Xs, mode=None):
        estimator = self.get_estimator()
        input_func = self.input_pipeline.get_predict_input_fn(Xs)
        length = len(Xs) if not callable(Xs) else None

        pred_gen = list(
            map(
                lambda y: y[mode] if mode else y,
                tqdm.tqdm(estimator.predict(input_fn=input_func,
                                            predict_keys=mode),
                          total=length,
                          desc="Inference")))
        return pred_gen

    def fit(self, *args, **kwargs):
        """ An alias for finetune. """
        return self.finetune(*args, **kwargs)

    def _predict(self, Xs):
        raw_preds = self._inference(Xs, PredictMode.NORMAL)
        return self.input_pipeline.label_encoder.inverse_transform(
            np.asarray(raw_preds))

    def predict(self, Xs):
        return self._predict(Xs)

    def _predict_proba(self, Xs):
        """
        Produce raw numeric outputs for proba predictions
        """
        raw_preds = self._inference(Xs, PredictMode.PROBAS)
        return raw_preds

    def predict_proba(self, *args, **kwargs):
        """
        The base method for predicting from the model.
        """
        raw_probas = self._predict_proba(*args, **kwargs)
        classes = self.input_pipeline.label_encoder.classes_

        formatted_predictions = []
        for probas in raw_probas:
            formatted_predictions.append(dict(zip(classes, probas)))
        return formatted_predictions

    def _featurize(self, Xs):
        raw_preds = self._inference(Xs, PredictMode.FEATURIZE)
        return np.asarray(raw_preds)

    @abstractmethod
    def featurize(self, *args, **kwargs):
        """
        Base method to get raw features out of the model.
        These features are the same that are fed into the target_model.
        """
        return self._featurize(*args, **kwargs)

    @classmethod
    def get_eval_fn(cls):
        raise NotImplementedError(
            "No default eval function is given, please pass an explicit eval fn to grid_search"
        )

    def transform(self, *args, **kwargs):
        """
        An alias for `featurize`.
        """
        return self.featurize(*args, **kwargs)

    def _set_random_seed(self, seed=None):
        seed = seed or self.config.seed
        random.seed(seed)
        np.random.seed(seed)
        tf.set_random_seed(seed)

    def generate_text(self,
                      seed_text='',
                      max_length=None,
                      use_extra_toks=True):
        """
        Performs a prediction on the Language modeling objective given some seed text. It uses a noisy greedy decoding.
        Temperature parameter for decoding is set in the config.
        :param max_length: The maximum length to decode to.
        :param seed_text: Defaults to the empty string. This will form the starting point to begin modelling
        :return: A string containing the generated text.
        """
        def dataset_encoded():
            while not dataset_encoded.finished:
                yield {
                    "tokens": arr_encoded.token_ids,
                    "mask": arr_encoded.mask
                }

        dataset_encoded.finished = False

        def get_input_fn():
            types, shapes = self.input_pipeline.feed_shape_type_def()
            tf_dataset = Dataset.from_generator(dataset_encoded, types[0],
                                                shapes[0])
            return tf_dataset.batch(1)

        self.config.use_extra_toks = use_extra_toks
        encoded = ENCODER._encode([seed_text])
        if encoded == [] and not use_extra_toks:
            raise ValueError(
                "If you are not using the extra tokens, you must provide some non-empty seed text"
            )
        start = [ENCODER.start] if use_extra_toks else []
        encoded = EncodedOutput(token_ids=start + encoded.token_ids[0])

        estimator = self.get_estimator(force_build_lm=True)
        predict = estimator.predict(input_fn=get_input_fn, )

        EOS = ENCODER.clf_token
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore")
            for i in range(len(encoded.token_ids),
                           (max_length or self.config.max_length) - 2):
                arr_encoded = self.input_pipeline._array_format(encoded)
                class_idx = next(predict)[PredictMode.GENERATE_TEXT]
                encoded.token_ids.append(class_idx[i])
                if encoded.token_ids[-1] == EOS:
                    break
            dataset_encoded.finished = True

        del self.config["use_extra_toks"]

        return ENCODER.decode(encoded.token_ids)

    def __getstate__(self):
        """
        Leave serialization of all tf objects to tf
        """
        required_fields = ['_load_from_file', 'config', 'input_pipeline']
        serialized_state = {
            k: v
            for k, v in self.__dict__.items() if k in required_fields
        }
        return serialized_state

    def save(self, path):
        """
        Saves the state of the model to disk to the folder specific by `path`.  If `path` does not exist, it will be auto-created.

        Save is performed in two steps:
            - Serialize tf graph to disk using tf.Saver
            - Serialize python model using pickle

        Note:
            Does not serialize state of Adam optimizer.
            Should not be used to save / restore a training model.
        """
        if path is None:
            return

        path = os.path.abspath(path)
        self.saver.save(self, path)

    @classmethod
    def load(cls, path):
        """
        Load a saved fine-tuned model from disk.  Path provided should be a folder which contains .pkl and tf.Saver() files

        :param path: string path name to load model from.  Same value as previously provided to :meth:`save`. Must be a folder.
        """
        saver = Saver(JL_BASE)
        model = saver.load(path)
        model._initialize()
        model.saver.variables = saver.variables
        return model

    @classmethod
    def finetune_grid_search(cls,
                             Xs,
                             Y,
                             *,
                             test_size,
                             config=None,
                             eval_fn=None,
                             probs=False,
                             return_all=False):
        """
        Performs grid search over config items defined using "GridSearchable" objects and returns either full results or
        the config object that relates to the best results. The default config contains grid searchable objects for the
        most important parameters to search over.

        :param Xs: Input text. Either [num_samples] or [sequence, num_samples] for single or multi input models respectively.
        :param Y: Targets, A list of targets, [num_samples] that correspond to each sample in Xs.
        :param test_size: Int or float. If an int is given this number of samples is used to validate, if a float is
         given then that fraction of samples is used.
        :param config: A config object, or None to use the default config.
        :param eval_fn: An eval function that takes 2 inputs (prediction, truth) and returns a float, with a max value being desired.
        :param probs: If true, eval_fn is passed probability outputs from predict_proba, otherwise the output of predict is used.
        :param return_all: If True, all results are returned, if False, only the best config is returned.
        :return: default is to return the best config object. If return_all is true, it returns a list of tuples of the
            form [(config, eval_fn output), ... ]
        """
        if isinstance(Xs[0], str):
            Xs = [Xs]
        config = config or get_default_config()
        config.val_size = 0.0
        eval_fn = eval_fn or cls.get_eval_fn()

        trainXs, testXs, trainY, testY = train_test_split(list_transpose(Xs),
                                                          Y,
                                                          test_size=test_size,
                                                          shuffle=True)
        trainXs = list_transpose(trainXs)
        testXs = list_transpose(testXs)
        gs = config.get_grid_searchable()
        ranged_keys = gs.keys()
        ranged_iterators = gs.values()
        grid_gen = itertools.product(*ranged_iterators)
        results = []
        for grid_item in grid_gen:
            config_ = deepcopy(config)
            config_.update(dict(zip(ranged_keys, grid_item)))
            instance = cls(config=config_)
            instance.finetune(*trainXs, Y=trainY)
            if probs:
                res = instance.predict_proba(*testXs)
            else:
                res = instance.predict(*testXs)
            results.append((config_, eval_fn(res, testY)))
            del instance

        if return_all:
            return results
        return max(results, key=lambda x: x[1])[0]

    @classmethod
    def finetune_grid_search_cv(cls,
                                Xs,
                                Y,
                                *,
                                n_splits,
                                test_size,
                                config=None,
                                eval_fn=None,
                                probs=False,
                                return_all=False):
        """
        Performs cross validated grid search over config items defined using "GridSearchable" objects and returns either full results or
        the config object that relates to the best results. The default config contains grid searchable objects for the
        most important parameters to search over.

        It should be noted that the cv splits are not guaranteed unique, but each split is given to each set of hparams.

        :param Xs: Input text. Either [num_samples] or [sequence, num_samples] for single or multi input models respectively.
        :param Y: Targets, A list of targets, [num_samples] that correspond to each sample in Xs.
        :param n_splits: Number of CV splits to do.
        :param test_size: Int or float. If an int is given this number of samples is used to validate, if a float is
            given then that fraction of samples is used.
        :param config: A config object, or None to use the default config.
        :param eval_fn: An eval function that takes 2 batches of outputs and returns a float, with a max value being
            desired. An arithmetic mean must make sense for this metric.
        :param probs: If true, eval_fn is passed probability outputs from predict_proba, otherwise the output of predict is used.
        :param return_all: If True, all results are returned, if False, only the best config is returned.
        :return: default is to return the best config object. If return_all is true, it returns a list of tuples of the
            form [(config, eval_fn output), ... ]
        """
        results = []
        for _ in range(n_splits):
            res = cls.finetune_grid_search(Xs,
                                           Y,
                                           test_size=test_size,
                                           probs=probs,
                                           eval_fn=eval_fn,
                                           config=config,
                                           return_all=True)
            results.append(res)
        results = list(zip(*results))
        aggregated_results = []
        for configuration in results:
            config_common = None
            sum_res = 0
            n_res = 0
            for config, result in configuration:
                config_common = config_common or config
                assert config == config_common
                n_res += 1
                sum_res += result
            aggregated_results.append((config_common, sum_res / n_res))

        if return_all:
            return aggregated_results

        return max(aggregated_results, key=lambda x: x[1])[0]

    def __del__(self):
        if hasattr(self, 'cleanup_glob') and self.cleanup_glob is not None:
            for file_or_folder in glob.glob(self.cleanup_glob):
                try:
                    shutil.rmtree(file_or_folder)
                except NotADirectoryError:
                    os.remove(file_or_folder)
Exemplo n.º 11
0
class BaseModel(object, metaclass=ABCMeta):
    """
    A sklearn-style task agnostic base class for finetuning a Transformer language model.
    """
    defaults = dict()

    def __init__(self, **kwargs):
        """
        For a full list of configuration options, see `finetune.config`.

        :param config: A config object generated by `finetune.config.get_config` or None (for default config).
        :param **kwargs: key-value pairs of config items to override.
        """
        weak_self = weakref.ref(self)

        def cleanup():
            strong_self = weak_self()
            if strong_self is not None:
                BaseModel.__del__(strong_self)

        atexit.register(cleanup)
        d = deepcopy(self.defaults)
        d.update(kwargs)
        self.config = get_config(**d)
        self.resolved_gpus = None
        self.validate_config()
        download_data_if_required(self.config.base_model)
        self.input_pipeline = self._get_input_pipeline()
        self._trained = False
        self._initialize()
        if self.config.debugging_logs:
            os.environ["TF_CPP_MIN_LOG_LEVEL"] = "0"
            tf_logging.set_verbosity(tf_logging.DEBUG)

    def validate_config(self):
        if (self.config.num_layers_trained != self.config.n_layer
                and self.config.train_embeddings):
            raise ValueError(
                "If you are only finetuning a subset of the layers, you cannot finetune embeddings."
            )

    @abstractmethod
    def _get_input_pipeline(self):
        pass

    def _initialize(self):
        # Initializes the non-serialized bits of the class.
        self._set_random_seed(self.config.seed)

        # state for prediction caching
        self._predictions = None
        self._cached_predict = False
        self._closed = False
        self._to_pull = 0

        try:
            self.estimator_dir = os.path.abspath(
                os.path.join(self.config.tensorboard_folder,
                             str(int(time.time()))))
            pathlib.Path(self.estimator_dir).mkdir(parents=True, exist_ok=True)
            self._tmp_dir = None
        except (TypeError, IOError):
            # TypeError --> tensorboard_folder is None
            # IOError --> user likely does not have permission to write to the tensorboard_folder directory
            # Both cases we can resolve by
            self._tmp_dir = tempfile.TemporaryDirectory(prefix="Finetune")
            self.estimator_dir = self._tmp_dir.name
            LOGGER.info("Saving tensorboard output to {}".format(
                self.estimator_dir))

        self.saver = Saver(
            fallback_filename=self.config.base_model_path,
            exclude_matches=None if self.config.save_adam_vars else "Adam",
            variable_transforms=[
                embedding_preprocessor(self.input_pipeline, self.config)
            ],
            save_dtype=self.config.save_dtype,
            target_model_init_from_base_model=self.config.
            target_model_init_from_base_model)

    @abstractmethod
    def _predict_op(self, logits, **kwargs):
        raise NotImplementedError

    @abstractmethod
    def _predict_proba_op(self, logits, **kwargs):
        raise NotImplementedError

    @abstractmethod
    def _target_model(self,
                      *,
                      config,
                      featurizer_state,
                      targets,
                      n_outputs,
                      train=False,
                      reuse=None,
                      **kwargs):
        # Overridden by subclass to attach a target model onto the shared base featurizer.
        raise NotImplementedError

    def _pre_target_model_hook(self, featurizer_state):
        add_context_embed(featurizer_state)

    def _n_steps(self, n_examples, batch_size, n_gpus):
        steps = int(math.ceil(n_examples / (batch_size * n_gpus)))
        return steps

    def finetune(self,
                 Xs,
                 Y=None,
                 batch_size=None,
                 context=None,
                 update_hook=None):
        if (not callable(Xs) and Y is not None and len(Xs) != len(Y)):
            raise FinetuneError(
                "Mismatch between number of examples ({}) and number of targets ({}) provided."
                .format(len(Xs), len(Y)))

        batch_size = batch_size or self.config.batch_size
        val_input_fn, train_input_fn, val_size, val_interval = self.input_pipeline.get_train_input_fns(
            Xs,
            Y,
            batch_size=batch_size,
            context=context,
            update_hook=update_hook)

        if self.config.keep_best_model:
            if isinstance(val_size, dict):
                tf.logging.warning(
                    "Cannot early stop or keep best model with MTL")
            elif val_size <= 10:
                tf.logging.warning(
                    "Early stopping / keeping best model with a validation size of {} is likely to case undesired results"
                    .format(val_size))

        force_build_lm = Y is None
        estimator, hooks = self.get_estimator(force_build_lm=force_build_lm)
        train_hooks = hooks.copy()

        steps_per_epoch = self._n_steps(
            n_examples=self.input_pipeline.dataset_size,
            batch_size=batch_size,
            n_gpus=max(1, len(self.resolved_gpus)),
        )
        num_steps = steps_per_epoch * self.config.n_epochs

        if self.config.tasks is not None:
            # Validation with MTL tasks
            for task in self.config.tasks:
                if val_size[task] > 0:
                    train_hooks.append(
                        tf.estimator.experimental.InMemoryEvaluatorHook(
                            estimator,
                            val_input_fn[task],
                            every_n_iter=val_interval[task],
                            steps=val_size[task] // batch_size,
                            name=task,
                        ))
                    train_hooks.append(
                        tf.estimator.experimental.InMemoryEvaluatorHook(
                            estimator,
                            val_input_fn[task + "_train"],
                            every_n_iter=val_interval[task],
                            steps=val_size[task] // batch_size,
                            name=task + "_train",
                        ))
            early_stopping_interval = sys.maxsize  # turn off early stopping for mtl.

        elif val_size > 0:
            # Validation with all other tasks.
            train_hooks.append(
                tf.estimator.experimental.InMemoryEvaluatorHook(
                    estimator,
                    val_input_fn,
                    every_n_iter=val_interval,
                    steps=math.ceil(val_size / batch_size),
                ))
            early_stopping_interval = val_interval
        else:
            early_stopping_interval = sys.maxsize

        train_hooks.append(
            self.saver.get_saver_hook(
                estimator=estimator,
                keep_best_model=self.config.keep_best_model,
                steps_per_epoch=steps_per_epoch,
                early_stopping_steps=self.config.early_stopping_steps,
                eval_frequency=early_stopping_interval,
                cache_weights_to_file=self.config.cache_weights_to_file))

        if self.config.in_memory_finetune is not None:
            train_hooks.extend(make_in_memory_finetune_hooks(self, estimator))

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            if self.config.prefit_init:
                tf.logging.info("Starting pre-fit initialisation...")
                num_layers_trained = self.config.num_layers_trained
                self.config.num_layers_trained = 0
                estimator.train(train_input_fn,
                                hooks=train_hooks,
                                steps=num_steps)
                self.config.num_layers_trained = num_layers_trained
                self.saver.variables = {
                    k: v
                    for k, v in self.saver.variables.items()
                    if "adam" not in k and "global_step" not in k
                }
                for weight in self.saver.variables:
                    if (weight.startswith("model/target/")):
                        w = self.saver.variables[weight]
                        if len(w.shape) == 1:
                            continue
                        w_flat = np.reshape(w, [-1, w.shape[-1]])
                        expectation_of_norm = ((self.config.weight_stddev**2) *
                                               w_flat.shape[0])**0.5
                        self.saver.variables[weight] = np.reshape(
                            expectation_of_norm * w_flat /
                            np.linalg.norm(w_flat, axis=0),
                            w.shape,
                        )

                tf.logging.info("Finishing pre-fit initialisation...")
            estimator.train(train_input_fn, hooks=train_hooks, steps=num_steps)

        self._trained = True

    def _distribute_strategy(self, visible_gpus):
        """
        Select a distribution strategy based on available devices.

        Side effect: sets self.resolved_gpus for future use in computing steps per epoch
        """

        if isinstance(visible_gpus, (list, tuple)):
            resolved_gpus = all_gpus(visible_gpus=tuple(visible_gpus))
        else:
            resolved_gpus = all_gpus()

        resolved_gpus_string = ['/gpu:{}'.format(gpu) for gpu in resolved_gpus]
        if len(resolved_gpus_string) == 1:
            distribute_strategy = tf.contrib.distribute.OneDeviceStrategy(
                resolved_gpus_string[0])
        else:
            if self.config.per_process_gpu_memory_fraction is not None:
                warnings.warn(
                    "Setting `per_process_gpu_memory_fraction` is currently unsupported in multi-gpu environments."
                )

            if isinstance(self.config.distribution_strategy, str):
                if self.config.distribution_strategy.lower() == "mirrored":
                    distribute_strategy = tf.distribute.MirroredStrategy()
                elif self.config.distribution_strategy.lower(
                ) == "central_storage":
                    distribute_strategy = tf.distribute.experimental.CentralStorageStrategy(
                        resolved_gpus_string or None)
                else:
                    raise FinetuneError(
                        "Distribute strategy {} is not supported, please try \"mirrored\" or \"central_storage\" or an instance of tf.distribute.Strategy"
                    )
            elif isinstance(self.config.distribution_strategy,
                            tf.distribute.Strategy):
                distribute_strategy = self.config.distribution_strategy

        self.resolved_gpus = resolved_gpus
        return distribute_strategy

    def _get_estimator_config(self):
        conf = tf.ConfigProto(
            allow_soft_placement=self.config.soft_device_placement,
            log_device_placement=self.config.log_device_placement,
        )
        if self.config.per_process_gpu_memory_fraction is not None:
            conf.gpu_options.per_process_gpu_memory_fraction = (
                self.config.per_process_gpu_memory_fraction)
        optimizer_options = conf.graph_options.optimizer_options
        if self.config.xla:
            optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1

        distribute_strategy = self._distribute_strategy(
            self.config.visible_gpus)
        config = tf.estimator.RunConfig(
            tf_random_seed=self.config.seed,
            save_summary_steps=self.config.val_interval,
            save_checkpoints_secs=None,
            save_checkpoints_steps=None,
            # disable auto summaries
            session_config=conf,
            log_step_count_steps=100,
            train_distribute=distribute_strategy,
            keep_checkpoint_max=1,
        )
        return config

    def get_estimator(self, force_build_lm=False, build_explain=False):
        build_lm = force_build_lm or self.config.lm_loss_coef > 0.0
        config = self._get_estimator_config()
        model_fn = get_model_fn(
            target_model_fn=self._target_model,
            pre_target_model_hook=self._pre_target_model_hook,
            predict_op=self._predict_op,
            predict_proba_op=self._predict_proba_op,
            build_target_model=self.input_pipeline.target_dim is not None,
            lm_type=self.config.lm_type if build_lm else None,
            encoder=self.input_pipeline.text_encoder,
            target_dim=self.input_pipeline.target_dim,
            label_encoder=self.input_pipeline.label_encoder,
            build_explain=build_explain,
            n_replicas=max(1, len(self.resolved_gpus)))

        hooks = [InitializeHook(self.saver)]
        est = tf.estimator.Estimator(
            model_dir=self.estimator_dir,
            model_fn=model_fn,
            config=config,
            params=self.config,
        )

        return est, hooks

    def close(self):
        self._closed = True

        if self._predictions is not None:

            # force input fn termination
            try:
                for _ in self._predictions:
                    pass
            except AttributeError:
                pass

            self._predictions = None

    def _clear_prediction_queue(self):
        # Flush examples used to pad the last batch
        # of previous call to predict()
        for i in range(self._to_pull):
            next(self._predictions)

        # Reset counter
        self._to_pull = 0

    def _data_generator(self):
        self._cached_example = None
        self._to_pull = 0
        while not self._closed:
            try:
                example = self._data.pop(0)

                # Ensure examples used for padding match expected input format
                if isinstance(example, str):
                    self._cached_example = ""
                elif isinstance(example, (list, tuple)):
                    self._cached_example = [""] * len(example)

                yield example
            except IndexError:
                # _data_generator was asked for more examples than we had
                # Feed a cached example through the input_pipeline
                # to fill out the batch, but remember to clear it
                # out of the queue later
                self._to_pull += 1
                yield self._cached_example

    @contextmanager
    def cached_predict(self):
        """
        Context manager that prevents the recreation of the tensorflow graph on every call to BaseModel.predict().
        """
        self._cached_predict = True
        yield self
        self._cached_predict = False
        self.close()

    def _cached_inference(self,
                          Xs,
                          predict_keys=None,
                          n_examples=None,
                          update_hook=None):
        """
        Ensure graph is not rebuilt on subsequent calls to .predict()
        """
        self._data = Xs
        self._closed = False
        n = n_examples or len(self._data)
        if self._predictions is None:
            input_fn = self.input_pipeline.get_predict_input_fn(
                self._data_generator)
            _estimator, hooks = self.get_estimator()
            self._predictions = _estimator.predict(input_fn=input_fn,
                                                   predict_keys=predict_keys,
                                                   hooks=hooks)

        self._clear_prediction_queue()

        predictions = [None] * n

        for i in ProgressBar(range(n),
                             total=n,
                             desc="Inference",
                             update_hook=update_hook):
            y = next(self._predictions)
            try:
                y = y[predict_keys[0]] if len(predict_keys) == 1 else y
            except ValueError:
                raise FinetuneError(
                    "Cannot call `predict()` on a model that has not been fit."
                )
            predictions[i] = y

        return predictions

    def _inference(self,
                   Xs,
                   predict_keys=None,
                   n_examples=None,
                   context=None,
                   update_hook=None):
        Xs = self.input_pipeline._format_for_inference(Xs)

        if self._cached_predict:
            return self._cached_inference(Xs=Xs,
                                          predict_keys=predict_keys,
                                          n_examples=n_examples,
                                          update_hook=update_hook)
        else:
            input_fn = self.input_pipeline.get_predict_input_fn(
                Xs, context=context)
            estimator, hooks = self.get_estimator(
                build_explain=PredictMode.EXPLAIN in predict_keys)
            length = len(Xs) if not callable(Xs) else None

            prediction_iterator = estimator.predict(input_fn=input_fn,
                                                    predict_keys=predict_keys,
                                                    hooks=hooks)
            predictions = ProgressBar(prediction_iterator,
                                      total=n_examples or length,
                                      desc="Inference",
                                      update_hook=update_hook)
            try:
                return [
                    pred[predict_keys[0]] if len(predict_keys) == 1 else pred
                    for pred in predictions
                ]
            except ValueError:
                raise FinetuneError(
                    "Cannot call `predict()` on a model that has not been fit."
                )

    def fit(self, *args, **kwargs):
        """ An alias for finetune. """
        return self.finetune(*args, **kwargs)

    def _predict(self, Xs, context=None, **kwargs):
        raw_preds = self._inference(Xs,
                                    predict_keys=[PredictMode.NORMAL],
                                    context=context,
                                    **kwargs)
        return self.input_pipeline.label_encoder.inverse_transform(
            np.asarray(raw_preds))

    def predict(self, Xs, context=None, **kwargs):
        return self._predict(Xs, context=context, **kwargs)

    def _predict_proba(self, Xs, context=None, **kwargs):
        """
        Produce raw numeric outputs for proba predictions
        """
        raw_preds = self._inference(Xs,
                                    predict_keys=[PredictMode.PROBAS],
                                    context=context,
                                    **kwargs)
        return raw_preds

    def predict_proba(self, *args, **kwargs):
        """
        The base method for predicting from the model.
        """
        raw_probas = self._predict_proba(*args, **kwargs)
        classes = self.input_pipeline.label_encoder.classes_

        formatted_predictions = []
        for probas in raw_probas:
            formatted_predictions.append(dict(zip(classes, probas)))
        return formatted_predictions

    def attention_weights(self, Xs):
        if self.config.base_model in [GPTModel, GPTModelSmall]:
            raw_preds = self._inference(Xs,
                                        predict_keys=[PredictMode.ATTENTION])
            return raw_preds
        raise NotImplementedError(
            "'attention_weights' only supported for GPTModel and GPTModelSmall base models."
        )

    def _featurize(self, Xs, **kwargs):
        raw_preds = self._inference(Xs,
                                    predict_keys=[PredictMode.FEATURIZE],
                                    **kwargs)
        return np.asarray(raw_preds)

    def _featurize_sequence(self, Xs, **kwargs):
        raw_preds = self._inference(Xs,
                                    predict_keys=[PredictMode.SEQUENCE],
                                    **kwargs)
        return np.asarray(raw_preds)

    def featurize(self, *args, **kwargs):
        """
        Base method to get raw features out of the model.
        These features are the same features that are fed into the target_model.
        """
        return self._featurize(*args, **kwargs)

    def featurize_sequence(self, *args, **kwargs):
        """
        Base method to get raw token-level features out of the model.
        These features are the same features that are fed into the target_model.
        """
        return self._featurize_sequence(*args, **kwargs)

    @classmethod
    def get_eval_fn(cls):
        raise NotImplementedError(
            "No default eval function is given, please pass an explicit eval fn to grid_search"
        )

    def transform(self, *args, **kwargs):
        """
        An alias for `featurize`.
        """
        return self.featurize(*args, **kwargs)

    def _set_random_seed(self, seed=None):
        seed = seed or self.config.seed
        random.seed(seed)
        np.random.seed(seed)
        tf.set_random_seed(seed)

    def generate_text(self,
                      seed_text="",
                      max_length=None,
                      use_extra_toks=None):
        """
        Performs a prediction on the Language modeling objective given some seed text. It uses a noisy greedy decoding.
        Temperature parameter for decoding is set in the config.
        :param max_length: The maximum length to decode to.
        :param seed_text: Defaults to the empty string. This will form the starting point to begin modelling
        :return: A string containing the generated text.
        """
        if use_extra_toks is None:
            use_extra_toks = self._trained

        def dataset_encoded():
            while not dataset_encoded.finished:
                yield {
                    "tokens": arr_encoded.token_ids,
                    "mask": arr_encoded.mask
                }

        dataset_encoded.finished = False

        def get_input_fn():
            types, shapes = self.input_pipeline.feed_shape_type_def()
            tf_dataset = Dataset.from_generator(dataset_encoded, types[0],
                                                shapes[0])
            return tf_dataset.batch(1)

        self.config.use_extra_toks = use_extra_toks
        encoded = self.input_pipeline.text_encoder._encode([seed_text])
        if encoded.token_ids == [] and not use_extra_toks:
            raise ValueError(
                "If you are not using the extra tokens, you must provide some non-empty seed text"
            )
        start = [self.input_pipeline.text_encoder.start_token
                 ] if use_extra_toks else []
        token_ids = start
        if encoded.token_ids is not None and len(encoded.token_ids):
            token_ids += encoded.token_ids[0]
        encoded = EncodedOutput(token_ids=token_ids)

        estimator, hooks = self.get_estimator(force_build_lm=True)
        predict = estimator.predict(input_fn=get_input_fn,
                                    predict_keys=[PredictMode.GENERATE_TEXT],
                                    hooks=hooks)

        EOS = self.input_pipeline.text_encoder.end_token
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore")
            for i in range(
                    len(encoded.token_ids) - 1,
                (max_length or self.config.max_length) - 2):
                arr_encoded = self.input_pipeline._array_format(encoded)
                class_idx = next(predict)[PredictMode.GENERATE_TEXT]
                encoded.token_ids.append(class_idx[-1])
                if encoded.token_ids[-1] == EOS:
                    break
            dataset_encoded.finished = True

        del self.config["use_extra_toks"]

        return self.input_pipeline.text_encoder.decode(encoded.token_ids)

    def __getstate__(self):
        """
        Leave serialization of all tf objects to tf
        """
        required_fields = [
            "_load_from_file", "config", "input_pipeline", "_trained"
        ]
        serialized_state = {
            k: v
            for k, v in self.__dict__.items() if k in required_fields
        }
        return serialized_state

    def save(self, path):
        """
        Saves the state of the model to disk to the folder specific by `path`.  If `path` does not exist, it will be auto-created.

        Save is performed in two steps:
            - Serialize tf graph to disk using tf.Saver
            - Serialize python model using pickle

        Note:
            Does not serialize state of Adam optimizer.
            Should not be used to save / restore a training model.
        """
        if path is None:
            return

        if isinstance(path, str):
            path = os.path.abspath(path)
        self.saver.save(self, path)

    def create_base_model(self, filename, exists_ok=False):
        """
        Saves the current weights into the correct file format to be used as a base model.
        :param filename: the path to save the base model relative to finetune's base model filestore.
        :param exists_ok: Whether to replace the model if it exists.
        """
        base_model_path = os.path.join(os.path.dirname(__file__), "model",
                                       filename)

        if not exists_ok and os.path.exists(base_model_path):
            base_model_path = base_model_path + str(int(time.time()))
            LOGGER.warning(
                "Cannot overwrite model {}, set exists_ok to overwrite, saving as {} to avoid loss of data."
                .format(filename, base_model_path))

        if not self.saver.variables:
            raise FinetuneError(
                "Cannot save a base model with no weights changed. Call fit before creating a base model."
            )
        weights_stripped = {
            k: v
            for k, v in self.saver.variables.items()
            if "featurizer" in k and "Adam" not in k
        }
        joblib.dump(weights_stripped, base_model_path)

    def load(path, *args, **kwargs):
        """
        Load a saved fine-tuned model from disk.  Path provided should be a folder which contains .pkl and tf.Saver() files

        :param path: string path name to load model from.  Same value as previously provided to :meth:`save`. Must be a folder.
        :param **kwargs: key-value pairs of config items to override.
        """
        if type(path) != str and not hasattr(path, "write"):
            instance = path
            raise FinetuneError(
                'The .load() method can only be called on the class, not on an instance. Try `{}.load("{}") instead.'
                .format(instance.__class__.__name__, args[0]))

        assert_valid_config(**kwargs)

        saver = Saver()
        model = saver.load(path)

        # Backwards compatability
        # Ensure old models get new default settings
        for setting, default in get_default_config().items():
            if not hasattr(model.config, setting):
                if setting == "add_eos_bos_to_chunk":
                    model.config.add_eos_bos_to_chunk = False
                else:
                    model.config.update({setting: default})

        model.config.update(kwargs)
        model.input_pipeline.config = model.config
        download_data_if_required(model.config.base_model)
        saver.set_fallback(model.config.base_model_path)
        model._initialize()
        model.saver.variables = saver.variables
        model._trained = True
        return model

    @classmethod
    def finetune_grid_search(cls,
                             Xs,
                             Y,
                             *,
                             test_size,
                             eval_fn=None,
                             probs=False,
                             return_all=False,
                             **kwargs):
        """
        Performs grid search over config items defined using "GridSearchable" objects and returns either full results or
        the config object that relates to the best results. The default config contains grid searchable objects for the
        most important parameters to search over.

        :param Xs: Input text. Either [num_samples] or [sequence, num_samples] for single or multi input models respectively.
        :param Y: Targets, A list of targets, [num_samples] that correspond to each sample in Xs.
        :param test_size: Int or float. If an int is given this number of samples is used to validate, if a float is
         given then that fraction of samples is used.
        :param eval_fn: An eval function that takes 2 inputs (prediction, truth) and returns a float, with a max value being desired.
        :param probs: If true, eval_fn is passed probability outputs from predict_proba, otherwise the output of predict is used.
        :param return_all: If True, all results are returned, if False, only the best config is returned.
        :param kwargs: Keyword arguments to pass to get_config()
        :return: default is to return the best config object. If return_all is true, it returns a list of tuples of the
            form [(config, eval_fn output), ... ]
        """
        if isinstance(Xs[0], str):
            Xs = [Xs]
        config = get_config(**kwargs)
        config.val_size = 0.0
        eval_fn = eval_fn or cls.get_eval_fn()

        trainXs, testXs, trainY, testY = train_test_split(list_transpose(Xs),
                                                          Y,
                                                          test_size=test_size,
                                                          shuffle=True)
        trainXs = list_transpose(trainXs)
        testXs = list_transpose(testXs)
        gs = config.get_grid_searchable()
        ranged_keys = gs.keys()
        ranged_iterators = gs.values()
        grid_gen = itertools.product(*ranged_iterators)
        results = []
        for grid_item in grid_gen:
            config_ = deepcopy(config)
            config_.update(dict(zip(ranged_keys, grid_item)))
            instance = cls(config=config_)
            instance.finetune(*trainXs, Y=trainY)
            if probs:
                res = instance.predict_proba(*testXs)
            else:
                res = instance.predict(*testXs)
            results.append((config_, eval_fn(res, testY)))
            del instance

        if return_all:
            return results
        return max(results, key=lambda x: x[1])[0]

    @classmethod
    def finetune_grid_search_cv(cls,
                                Xs,
                                Y,
                                *,
                                n_splits,
                                test_size,
                                eval_fn=None,
                                probs=False,
                                return_all=False,
                                **kwargs):
        """
        Performs cross validated grid search over config items defined using "GridSearchable" objects and returns either full results or
        the config object that relates to the best results. The default config contains grid searchable objects for the
        most important parameters to search over.

        It should be noted that the cv splits are not guaranteed unique, but each split is given to each set of hparams.

        :param Xs: Input text. Either [num_samples] or [sequence, num_samples] for single or multi input models respectively.
        :param Y: Targets, A list of targets, [num_samples] that correspond to each sample in Xs.
        :param n_splits: Number of CV splits to do.
        :param test_size: Int or float. If an int is given this number of samples is used to validate, if a float is
            given then that fraction of samples is used.
        :param eval_fn: An eval function that takes 2 batches of outputs and returns a float, with a max value being
            desired. An arithmetic mean must make sense for this metric.
        :param probs: If true, eval_fn is passed probability outputs from predict_proba, otherwise the output of predict is used.
        :param return_all: If True, all results are returned, if False, only the best config is returned.
        :param kwargs: Keyword arguments to pass to get_config()
        :return: default is to return the best config object. If return_all is true, it returns a list of tuples of the
            form [(config, eval_fn output), ... ]
        """
        results = []
        for _ in range(n_splits):
            res = cls.finetune_grid_search(Xs,
                                           Y,
                                           test_size=test_size,
                                           probs=probs,
                                           eval_fn=eval_fn,
                                           return_all=True,
                                           **kwargs)
            results.append(res)
        results = list(zip(*results))
        aggregated_results = []
        for configuration in results:
            config_common = None
            sum_res = 0
            n_res = 0
            for config, result in configuration:
                config_common = config_common or config
                assert config == config_common
                n_res += 1
                sum_res += result
            aggregated_results.append((config_common, sum_res / n_res))

        if return_all:
            return aggregated_results

        return max(aggregated_results, key=lambda x: x[1])[0]

    def process_long_sequence(self, X, context=None, **kwargs):
        arr_encoded = [
            self.input_pipeline._text_to_ids(x)
            for x in self.input_pipeline._format_for_inference(X)
        ]

        flat_array_encoded = []
        sequence_id = []
        for i, ae in enumerate(arr_encoded):
            for sample in ae:
                flat_array_encoded.append(sample)
                sequence_id.append(i)

        labels, batch_probas = [], []
        for pred in self._inference(
                X,
                predict_keys=[PredictMode.PROBAS, PredictMode.NORMAL],
                n_examples=len(flat_array_encoded),
                context=context,
                **kwargs):
            normal_pred = pred[PredictMode.NORMAL]
            if not hasattr(self, 'multi_label'):
                normal_pred = np.expand_dims(normal_pred, 0)
            labels.append(
                self.input_pipeline.label_encoder.inverse_transform(
                    normal_pred))
            batch_probas.append(pred[PredictMode.PROBAS])

        if not batch_probas:
            batch_probas = [None] * len(labels)

        for chunk_idx, (label_seq,
                        proba_seq) in enumerate(zip(labels, batch_probas)):
            position_seq = flat_array_encoded[chunk_idx].char_locs
            start_of_doc = chunk_idx == 0 or sequence_id[
                chunk_idx - 1] != sequence_id[chunk_idx]
            end_of_doc = (chunk_idx + 1 == len(flat_array_encoded) or
                          sequence_id[chunk_idx] != sequence_id[chunk_idx + 1])
            yield position_seq, start_of_doc, end_of_doc, label_seq, proba_seq

    def __del__(self):
        if hasattr(self, "_tmp_dir") and self._tmp_dir is not None:
            self._tmp_dir.cleanup()
Exemplo n.º 12
0
class BaseModel(object, metaclass=ABCMeta):
    """
    A sklearn-style class for finetuning a Transformer language model on a classification task.
    """

    def __init__(self, config=None, **kwargs):
        """ 
        For a full list of configuration options, see `finetune.config`.
        
        :param config: A config object generated by `finetune.config.get_config` or None (for default config).
        :param **kwargs: key-value pairs of config items to override.
        """
        tf.reset_default_graph()

        self.config = config or get_default_config()
        self.config.update(kwargs)

        if self.config.num_layers_trained != self.config.n_layer and self.config.train_embeddings:
            raise ValueError("If you are only finetuning a subset of the layers, you cannot finetune embeddings.")

        self.label_encoder = None
        self._initialize()
        self.target_dim = None
        self._load_from_file = False

    def _initialize(self):
        # Initializes the non-serialized bits of the class.
        self._set_random_seed(self.config.seed)

        download_data_if_required()
        self.encoder = TextEncoder()

        # symbolic ops
        self.logits = None  # classification logits
        self.target_loss = None  # cross-entropy loss
        self.lm_losses = None  # language modeling losses
        self.lm_predict_op = None
        self.train = None  # gradient + parameter update
        self.features = None  # hidden representation fed to classifier
        self.summaries = None  # Tensorboard summaries
        self.train_writer = None
        self.valid_writer = None
        self.predict_params = None
        self.train_op = None
        self.predict_op = None
        self.predict_proba_op = None
        self.sess = None
        self.noop = tf.no_op()

        # indicator vars
        self.is_built = False  # has tf graph been constructed?
        self.is_trained = False  # has model been fine-tuned?
        self.require_lm = False

        def process_embeddings(name, value):
            if "/we:0" not in name:
                return value

            vocab_size = self.encoder.vocab_size
            word_embeddings = value[:vocab_size - len(self.encoder.special_tokens)]
            special_embed = value[len(word_embeddings): vocab_size]
            positional_embed = value[vocab_size:]
            if self.config.interpolate_pos_embed and self.config.max_length != len(positional_embed):
                positional_embed = interpolate_pos_embed(positional_embed, self.config.max_length)
            elif self.config.max_length > len(positional_embed):
                raise ValueError("Max Length cannot be greater than {} if interploate_pos_embed is turned off".format(len(positional_embed)))
            else:
                positional_embed = positional_embed[:self.config.max_length]

            embeddings = np.concatenate((word_embeddings, special_embed, positional_embed), axis=0)
            return embeddings

        self.saver = Saver(
            fallback_filename=JL_BASE,
            exclude_matches=None if self.config.save_adam_vars else "adam",
            variable_transforms=[process_embeddings]
        )

    def _format_for_encoding(self, Xs):
        """
        Most subclasses take in inputs as:
            List (batch) of list (docs)
        
        Encode_multi_input expect the following format:
            List (batch) of list (docs) of list (subseqs) of text
        
        This method is responsible for standardizing inputs to the above format
        """
        return [[[x] for x in X] for X in Xs]

    def _text_to_ids(self, Xs, Y=None, max_length=None):
        # Maps lists of text to formatted numpy arrays of token ids and loss-masks marking the lengths of the sequences.
        max_length = max_length or self.config.max_length

        # If 1d array of text is passed, coerce into multifield format
        if len(Xs) and isinstance(Xs[0], (bytes, str)):
            Xs = [[x] for x in Xs]
    
        Xs = self._format_for_encoding(Xs)
        
        if self.config.chunk_long_sequences and len(Xs[0]) == 1:
            # can only chunk single sequence inputs
            chunk_size = max_length - 2 
            step_size = chunk_size // 3
            encoded = self.encoder.encode_multi_input(Xs, Y=Y, max_length=sys.maxsize)
            
            d = defaultdict(list)
            for idx in range(len(encoded.token_ids)):
                starts = list(range(0, len(encoded.token_ids[idx]), step_size))
                for start in starts:
                    end = start + chunk_size

                    for field in EncodedOutput._fields:
                        field_value = getattr(encoded, field)
                        if field_value is not None:
                            d[field].append(field_value[idx][start:end])

            encoder_out = EncodedOutput(**d)
            return self._array_format(encoder_out)
        else:
            encoder_out = self.encoder.encode_multi_input(Xs, Y=Y, max_length=max_length)
            return self._array_format(encoder_out)
        

    @abstractmethod
    def _predict_op(self, logits, **kwargs):
        raise NotImplementedError

    @abstractmethod
    def _predict_proba_op(self, logits, **kwargs):
        raise NotImplementedError

    @abstractmethod
    def _target_model(self, *, featurizer_state, targets, n_outputs, train=False, reuse=None, **kwargs):
        # Overridden by subclass to attach a target model onto the shared base featurizer.
        raise NotImplementedError

    @abstractmethod
    def _target_encoder(self):
        # Overridden by subclass to produce the right target encoding for a given target model.
        raise NotImplementedError

    def _eval(self, *tensors, feed_dict):
        """
        Evaluate the value of each of the provided tensors.
        Returns a `dict` that maps from tensor to result value.  
        If any result value is None, that result is excluded from the results `dict`.
        """
        tensors = [
            tensor if tensor is not None else self.noop
            for tensor in tensors
        ]
        values = self.sess.run(tensors, feed_dict=feed_dict)
        return {
            tensor: value
            for tensor, value in zip(tensors, values)
            if value is not None
        }

    def finetune(self, Xs, Y=None, batch_size=None):
        arr_encoded = self._text_to_ids(Xs)
        return self._training_loop(
            arr_encoded,
            Y=Y,
            batch_size=batch_size,
        )

    def _training_loop(self, arr_encoded, Y=None, batch_size=None):
        self.label_encoder = self._target_encoder()

        idxs = list(range(len(arr_encoded.token_ids)))
        train_idxs, val_idxs = train_test_split(idxs, test_size=self.config.val_size)

        if Y is None:
            # only language model will be trained, mock fake target of right length
            train_Y = np.asarray([[]] * len(train_idxs))
            val_Y = np.asarray([[]] * len(val_idxs))
            target_dim = None
        else:
            Y = np.asarray(Y)
            train_Y = self.label_encoder.fit_transform(Y[train_idxs])
            val_Y = self.label_encoder.transform(Y[val_idxs])
            target_dim = self.label_encoder.target_dim

        batch_size = batch_size or self.config.batch_size
        n_batch_train = batch_size * max(len(self.config.visible_gpus), 1)
        n_examples = len(train_idxs)
        n_updates_total = (n_examples // n_batch_train) * self.config.n_epochs

        train_dataset = (arr_encoded.token_ids[train_idxs], arr_encoded.mask[train_idxs], train_Y)
        val_dataset = (arr_encoded.token_ids[val_idxs], arr_encoded.mask[val_idxs], val_Y)

        self._build_model(n_updates_total=n_updates_total, target_dim=target_dim)
        self.is_trained = True

        avg_train_loss = None
        avg_val_loss = None
        global_step = 0
        best_val_loss = float("inf")
        val_window = [float("inf")] * self.config.val_window_size

        for i in range(self.config.n_epochs):
            iterator = iter_data(
                *train_dataset, 
                n_batch=n_batch_train, 
                tqdm_desc="Epoch {}".format(i),
                verbose=self.config.verbose
            )
            for (xmb, mmb, ymb) in iterator:
                feed_dict = {
                    self.X: xmb,
                    self.M: mmb,
                }
                if target_dim:
                    feed_dict[self.Y] = ymb

                global_step += 1
                if global_step % self.config.val_interval == 0:
                    feed_dict[self.do_dropout] = DROPOUT_OFF

                    outputs = self._eval(self.summaries, feed_dict=feed_dict)
                    if self.train_writer is not None:
                        self.train_writer.add_summary(outputs.get(self.summaries), global_step)

                    sum_val_loss = 0
                    for xval, mval, yval in iter_data(*val_dataset, n_batch=n_batch_train, verbose=self.config.verbose,
                                                      tqdm_desc="Validation"):
                        feed_dict = {
                            self.X: xval,
                            self.M: mval,
                            self.do_dropout: DROPOUT_OFF
                        }
                        if target_dim:
                            feed_dict[self.Y] = yval

                        outputs = self._eval(self.target_loss, self.summaries, feed_dict=feed_dict)
                        if self.valid_writer is not None:
                            self.valid_writer.add_summary(outputs.get(self.summaries), global_step)

                        val_cost = outputs.get(self.target_loss, 0)
                        sum_val_loss += val_cost

                        if avg_val_loss is None:
                            avg_val_loss = val_cost
                        else:
                            avg_val_loss = (
                                    avg_val_loss * self.config.rolling_avg_decay
                                    + val_cost * (1 - self.config.rolling_avg_decay)
                            )
                    val_window.append(sum_val_loss)
                    val_window.pop(0)

                    if np.mean(val_window) <= best_val_loss:
                        best_val_loss = np.mean(val_window)
                        if self.config.autosave_path is not None:
                            self.save(self.config.autosave_path)

                    tqdm.tqdm.write("Train loss: {}\t Validation loss: {}".format(avg_train_loss, avg_val_loss))

                feed_dict[self.do_dropout] = DROPOUT_ON
                outputs = self._eval(self.target_loss, self.train_op, feed_dict=feed_dict)

                cost = outputs.get(self.target_loss, 0)
                if avg_train_loss is None:
                    avg_train_loss = cost
                else:
                    avg_train_loss = avg_train_loss * self.config.rolling_avg_decay + cost * (
                            1 - self.config.rolling_avg_decay)

        return self

    def fit(self, *args, **kwargs):
        """ An alias for finetune. """
        return self.finetune(*args, **kwargs)

    def _predict(self, Xs, max_length=None):
        predictions = []
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore")
            max_length = max_length or self.config.max_length
            for xmb, mmb in self._infer_prep(Xs, max_length=max_length):
                output = self._eval(self.predict_op,
                    feed_dict={
                        self.X: xmb,
                        self.M: mmb,
                        self.do_dropout: DROPOUT_OFF
                    }
                )
                prediction = output.get(self.predict_op)
                formatted_predictions = self.label_encoder.inverse_transform(prediction)
                predictions.append(formatted_predictions)
        return np.concatenate(predictions).tolist()

    def predict(self, Xs, max_length=None):
        return self._predict(Xs, max_length=max_length)

    def _predict_proba(self, Xs, max_length=None):
        """
        Produce raw numeric outputs for proba predictions
        """
        predictions = []
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore")
            max_length = max_length or self.config.max_length
            for xmb, mmb in self._infer_prep(Xs, max_length=max_length):
                output = self._eval(
                    self.predict_proba_op,
                    feed_dict={
                        self.X: xmb,
                        self.M: mmb,
                        self.do_dropout: DROPOUT_OFF
                    }
                )
                probas = output.get(self.predict_proba_op)
                predictions.extend(probas)
        return predictions

    def predict_proba(self, *args, **kwargs):
        """
        The base method for predicting from the model.
        """
        raw_probas = self._predict_proba(*args, **kwargs)
        classes = self.label_encoder.classes_

        formatted_predictions = []
        for probas in raw_probas:
            formatted_predictions.append(
                dict(zip(classes, probas))
            )
        return formatted_predictions

    def _featurize(self, Xs, max_length=None):
        features = []
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore")
            max_length = max_length or self.config.max_length
            for xmb, mmb in self._infer_prep(Xs, max_length=max_length):
                feature_batch = self.sess.run(self.features, {
                    self.X: xmb,
                    self.M: mmb,
                    self.do_dropout: DROPOUT_OFF
                })
                features.append(feature_batch)
        return np.concatenate(features)

    @abstractmethod
    def featurize(self, *args, **kwargs):
        """
        Base method to get raw features out of the model.
        These features are the same that are fed into the target_model.
        """
        return self._featurize(*args, **kwargs)

    @classmethod
    def get_eval_fn(cls):
        raise NotImplementedError("No default eval function is given, please pass an explicit eval fn to grid_search")

    def transform(self, *args, **kwargs):
        """
        An alias for `featurize`.
        """
        return self.featurize(*args, **kwargs)

    def _infer_prep(self, Xs, max_length=None):
        max_length = max_length or self.config.max_length
        arr_encoded = self._text_to_ids(Xs, max_length=max_length)
        n_batch_train = self.config.batch_size * max(len(self.config.visible_gpus), 1)
        self._build_model(n_updates_total=0, target_dim=self.target_dim, train=False)
        yield from iter_data(arr_encoded.token_ids, arr_encoded.mask, n_batch=n_batch_train,
                             verbose=self.config.verbose)

    def _array_format(self, encoded_output):
        """
        Returns numpy array of token idxs and corresponding mask
        Returned `x` array contains two channels:
            0: byte-pair encoding embedding
            1: positional embedding
        """
        n = len(encoded_output.token_ids)
        seq_lengths = [len(x) for x in encoded_output.token_ids]
        x = np.zeros((n, self.config.max_length, 2), dtype=np.int32)
        mask = np.zeros((n, self.config.max_length), dtype=np.float32)
        labels_arr = np.full((n, self.config.max_length), PAD_TOKEN,
                             dtype='object') if encoded_output.labels is not None else None
        for i, seq_length in enumerate(seq_lengths):
            # BPE embedding
            x[i, :seq_length, 0] = encoded_output.token_ids[i]
            # masking: value of 1 means "consider this in cross-entropy LM loss"
            mask[i, 1:seq_length] = 1
            if encoded_output.labels:
                labels_arr[i, :seq_length] = encoded_output.labels[i]
        # positional_embeddings
        x[:, :, 1] = np.arange(self.encoder.vocab_size, self.encoder.vocab_size + self.config.max_length)

        return ArrayEncodedOutput(
            token_ids=x,
            tokens=encoded_output.tokens,
            labels=labels_arr,
            char_locs=encoded_output.char_locs,
            mask=mask,
        )

    def _compile_train_op(self, *, params, grads, n_updates_total):
        grads = average_grads(grads)

        if self.config.summarize_grads:
            self.summaries += tf.contrib.training.add_gradients_summaries(grads)

        grads = [grad for grad, param in grads]
        self.train_op = AdamWeightDecay(
            params=params,
            grads=grads,
            lr=self.config.lr,
            schedule=partial(schedules[self.config.lr_schedule], warmup=self.config.lr_warmup),
            t_total=n_updates_total,
            l2=self.config.l2_reg,
            max_grad_norm=self.config.max_grad_norm,
            vector_l2=self.config.vector_l2,
            b1=self.config.b1,
            b2=self.config.b2,
            e=self.config.epsilon,
            pretrained_weights=self.saver.get_pretrained_weights(),
            deviation_regularization=self.config.regularize_deviation
        )

    def _construct_graph(self, n_updates_total, target_dim=None, train=True):
        gpu_grads = []
        self.summaries = []

        # store whether or not graph was previously compiled with dropout
        self.train = train
        self._define_placeholders(target_dim=target_dim)

        aggregator = defaultdict(list)
        train_loss_tower = 0
        gpus = self.config.visible_gpus
        n_splits = max(len(gpus), 1)

        # multi-GPU setup, using CPU as param server is most efficient unless system has direct GPU connections
        # single GPU, no need to use a different GPU as a parameter server
        params_device = 'cpu' if len(gpus) != 1 else gpus[0]

        # decide on setting for language model loss coefficient
        # if the language model loss does not contribute to overall loss,
        # remove the language model computation from the graph
        lm_loss_coef = self.config.lm_loss_coef
        if target_dim is None:
            lm_loss_coef = 1.0
        compile_lm = (train and lm_loss_coef > 0) or self.require_lm

        for i, (X, M, Y) in enumerate(soft_split(self.X, self.M, self.Y, n_splits=n_splits)):
            do_reuse = True if i > 0 else tf.AUTO_REUSE

            if gpus:
                device = tf.device(assign_to_gpu(gpus[i], params_device=params_device))
            else:
                device = tf.device('cpu')

            scope = tf.variable_scope(tf.get_variable_scope(), reuse=do_reuse)

            with device, scope:
                featurizer_state = featurizer(
                    X,
                    config=self.config,
                    encoder=self.encoder,
                    dropout_placeholder=self.do_dropout,
                    train=train,
                    reuse=do_reuse
                )

                if compile_lm:
                    language_model_state = language_model(
                        X=X,
                        M=M,
                        config=self.config,
                        embed_weights=featurizer_state['embed_weights'],
                        hidden=featurizer_state['sequence_features'],
                        reuse=do_reuse
                    )

                    train_loss = lm_loss_coef * tf.reduce_mean(language_model_state['losses'])
                    aggregator['lm_losses'].append(language_model_state['losses'])
                    lm_logits = language_model_state["logits"]
                    aggregator["lm_model"].append(sample_with_temperature(lm_logits, self.config.lm_temp))
                else:
                    train_loss = 0

                aggregator['features'].append(featurizer_state['features'])

                if target_dim is not None:
                    with tf.variable_scope('model/target'):
                        target_model_state = self._target_model(
                            featurizer_state=featurizer_state,
                            targets=Y,
                            n_outputs=target_dim,
                            train=train,
                            reuse=do_reuse,
                            max_length=self.config.max_length
                        )
                    train_loss += (1 - lm_loss_coef) * tf.reduce_mean(target_model_state['losses'])
                    train_loss_tower += train_loss

                    aggregator['logits'].append(target_model_state['logits'])
                    aggregator['target_losses'].append(target_model_state['losses'])

                params = find_trainable_variables("model")
                grads = tf.gradients(train_loss, params)
                grads = list(zip(grads, params))
                gpu_grads.append(grads)

        with tf.device(params_device):
            self.features = tf.concat(aggregator['features'], axis=0)

            if compile_lm:
                self.lm_predict_op = tf.concat(aggregator["lm_model"], 0)
                self.lm_losses = tf.concat(aggregator['lm_losses'], axis=0)
                self.lm_loss = tf.reduce_mean(self.lm_losses)
                self.summaries.append(tf.summary.scalar('LanguageModelLoss', self.lm_loss))

            if train:
                self._compile_train_op(
                    params=params,
                    grads=gpu_grads,
                    n_updates_total=n_updates_total
                )

            if target_dim is not None:
                self.logits = tf.concat(aggregator['logits'], axis=0)
                self.target_losses = concat_or_stack(aggregator['target_losses'])

                self.predict_op = self._predict_op(
                    self.logits, **target_model_state.get("predict_params", {})
                )
                self.predict_proba_op = self._predict_proba_op(
                    self.logits, **target_model_state.get("predict_params", {})
                )
                self.target_loss = tf.reduce_mean(self.target_losses)

                self.summaries.append(tf.summary.scalar('TargetModelLoss', self.target_loss))
                self.summaries.append(tf.summary.scalar('TotalLoss', train_loss_tower / n_splits))

            self.summaries = tf.summary.merge(self.summaries) if self.summaries else self.noop

    def _build_model(self, n_updates_total, target_dim, train=True):
        """
        Construct tensorflow symbolic graph.
        """
        if not self.is_trained or train != self.train or self.target_dim != target_dim:
            # reconstruct graph to include/remove dropout
            # if `train` setting has changed
            self._construct_graph(n_updates_total, target_dim, train=train)

        self._initialize_session()
        self.saver.initialize(self.sess)

        self.target_dim = target_dim
        if train:
            if self.config.tensorboard_folder is not None:
                if not os.path.exists(self.config.tensorboard_folder):
                    os.mkdir(self.config.tensorboard_folder)
                self.train_writer = tf.summary.FileWriter(self.config.tensorboard_folder + '/train', self.sess.graph)
                self.valid_writer = tf.summary.FileWriter(self.config.tensorboard_folder + '/valid', self.sess.graph)
        self.is_built = True

    def _initialize_session(self):
        if self.sess is None:
            gpus = self.config.visible_gpus
            os.environ['CUDA_VISIBLE_DEVICES'] = ",".join([str(gpu) for gpu in gpus])
            conf = tf.ConfigProto(allow_soft_placement=self.config.soft_device_placement,
                                  log_device_placement=self.config.log_device_placement)
            self.sess = tf.Session(config=conf)

    def _set_random_seed(self, seed=None):
        seed = seed or self.config.seed
        random.seed(seed)
        np.random.seed(seed)
        tf.set_random_seed(seed)

    def _target_placeholder(self, target_dim=None):
        return tf.placeholder(tf.float32, [None, target_dim or 1])  # classification targets

    def _define_placeholders(self, target_dim=None):
        # tf placeholders
        self.X = tf.placeholder(tf.int32, [None, self.config.max_length, 2])  # token idxs (BPE embedding + positional)
        self.M = tf.placeholder(tf.float32, [None, self.config.max_length])  # sequence mask
        # when target dim is not set, an array of [None] targets is passed as a placeholder

        self.do_dropout = tf.placeholder(tf.float32)  # 1 for do dropout and 0 to not do dropout
        self.Y = self._target_placeholder(target_dim=target_dim)

    def generate_text(self, seed_text='', max_length=None):
        """
        Performs a prediction on the Language modeling objective given some seed text. It uses a noisy greedy decoding.
        Temperature parameter for decoding is set in the config.

        :param max_length: The maximum length to decode to.
        :param seed_text: Defaults to the empty string. This will form the starting point to begin modelling
        :return: A string containing the generated text.
        """
        self.require_lm = True
        encoded = self.encoder._encode([seed_text])
        self._build_model(n_updates_total=0, target_dim=self.target_dim, train=False)
        string = [self.encoder['_start_']] + encoded.token_ids[0]
        EOS = self.encoder['_classify_']
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore")
            for i in range(len(encoded.token_ids), (max_length or self.config.max_length) - 1):
                arr_encoded = self._array_format(encoded)
                class_idx = self.sess.run(self.lm_predict_op, {self.X: arr_encoded.token_ids, self.M: arr_encoded.mask})
                string.append(class_idx[i])
                if string[-1] == EOS:
                    break
        return self.encoder.decode(string)

    def __getstate__(self):
        """
        Leave serialization of all tf objects to tf
        """
        required_fields = [
            'label_encoder', 'target_dim', '_load_from_file', 'config', 'target_type',
        ]
        serialized_state = {
            k: v for k, v in self.__dict__.items()
            if k in required_fields
        }
        return serialized_state

    def save(self, path):
        """
        Saves the state of the model to disk to the folder specific by `path`.  If `path` does not exist, it will be auto-created.

        Save is performed in two steps:
            - Serialize tf graph to disk using tf.Saver
            - Serialize python model using pickle

        Note:
            Does not serialize state of Adam optimizer.
            Should not be used to save / restore a training model.
        """
        if path is None:
            return

        path = os.path.abspath(path)
        self.saver.save(self, path)
        self._load_from_file = False

    @classmethod
    def load(cls, path):
        """
        Load a saved fine-tuned model from disk.  Path provided should be a folder which contains .pkl and tf.Saver() files

        :param path: string path name to load model from.  Same value as previously provided to :meth:`save`. Must be a folder.
        """
        saver = Saver(JL_BASE)
        model = saver.load(path)
        model._initialize()
        model.saver.variables = saver.variables
        tf.reset_default_graph()
        return model
   
    @classmethod
    def finetune_grid_search(cls, Xs, Y, *, test_size, config=None, eval_fn=None, probs=False, return_all=False):
        """
        Performs grid search over config items defined using "GridSearchable" objects and returns either full results or
        the config object that relates to the best results. The default config contains grid searchable objects for the
        most important parameters to search over.

        :param Xs: Input text. Either [num_samples] or [sequence, num_samples] for single or multi input models respectively.
        :param Y: Targets, A list of targets, [num_samples] that correspond to each sample in Xs.
        :param test_size: Int or float. If an int is given this number of samples is used to validate, if a float is
         given then that fraction of samples is used.
        :param config: A config object, or None to use the default config.
        :param eval_fn: An eval function that takes 2 inputs (prediction, truth) and returns a float, with a max value being desired.
        :param probs: If true, eval_fn is passed probability outputs from predict_proba, otherwise the output of predict is used.
        :param return_all: If True, all results are returned, if False, only the best config is returned.
        :return: default is to return the best config object. If return_all is true, it returns a list of tuples of the
            form [(config, eval_fn output), ... ]

        """
        if isinstance(Xs[0], str):
            Xs = [Xs]
        config = config or get_default_config()
        config.val_size = 0.0
        eval_fn = eval_fn or cls.get_eval_fn()

        trainXs, testXs, trainY, testY = train_test_split(list_transpose(Xs), Y, test_size=test_size, shuffle=True)
        trainXs = list_transpose(trainXs)
        testXs = list_transpose(testXs)
        gs = config.get_grid_searchable()
        ranged_keys = gs.keys()
        ranged_iterators = gs.values()
        grid_gen = itertools.product(*ranged_iterators)
        results = []
        for grid_item in grid_gen:
            config_ = deepcopy(config)
            config_.update(dict(zip(ranged_keys, grid_item)))
            instance = cls(config=config_)
            instance.finetune(*trainXs, Y=trainY)
            if probs:
                res = instance.predict_proba(*testXs)
            else:
                res = instance.predict(*testXs)
            results.append((config_, eval_fn(res, testY)))
            del instance

        if return_all:
            return results
        return max(results, key=lambda x: x[1])[0]


    @classmethod
    def finetune_grid_search_cv(cls, Xs, Y, *, n_splits, test_size, config=None, eval_fn=None, probs=False, return_all=False):
        """
        Performs cross validated grid search over config items defined using "GridSearchable" objects and returns either full results or
        the config object that relates to the best results. The default config contains grid searchable objects for the
        most important parameters to search over.

        It should be noted that the cv splits are not guaranteed unique, but each split is given to each set of hparams.

        :param Xs: Input text. Either [num_samples] or [sequence, num_samples] for single or multi input models respectively.
        :param Y: Targets, A list of targets, [num_samples] that correspond to each sample in Xs.
        :param n_splits: Number of CV splits to do.
        :param test_size: Int or float. If an int is given this number of samples is used to validate, if a float is
            given then that fraction of samples is used.
        :param config: A config object, or None to use the default config.
        :param eval_fn: An eval function that takes 2 batches of outputs and returns a float, with a max value being
            desired. An arithmetic mean must make sense for this metric.
        :param probs: If true, eval_fn is passed probability outputs from predict_proba, otherwise the output of predict is used.
        :param return_all: If True, all results are returned, if False, only the best config is returned.
        :return: default is to return the best config object. If return_all is true, it returns a list of tuples of the
            form [(config, eval_fn output), ... ]
        """
        results = []
        for _ in range(n_splits):
            res = cls.finetune_grid_search(Xs, Y, test_size=test_size, probs=probs, eval_fn=eval_fn, config=config,
                                           return_all=True)
            results.append(res)
        results = list(zip(*results))
        aggregated_results = []
        for configuration in results:
            config_common = None
            sum_res = 0
            n_res = 0
            for config, result in configuration:
                config_common = config_common or config
                assert config == config_common
                n_res += 1
                sum_res += result
            aggregated_results.append((config_common, sum_res/n_res))

        if return_all:
            return aggregated_results

        return max(aggregated_results, key=lambda x: x[1])[0]

    def __del__(self):
        try:
            if self.sess is not None:
                self.sess.close()
        except AttributeError:
            pass
Exemplo n.º 13
0
class BaseModel(object, metaclass=ABCMeta):
    """
    A sklearn-style class for finetuning a Transformer language model on a classification task.
    """
    def __init__(self, config=None, **kwargs):
        """ 
        For a full list of configuration options, see `finetune.config`.
        
        :param config: A config object generated by `finetune.config.get_config` or None (for default config).
        :param **kwargs: key-value pairs of config items to override.
        """
        tf.reset_default_graph()

        self.config = config or get_default_config()
        self.config.update(kwargs)

        if self.config.num_layers_trained != self.config.n_layer and self.config.train_embeddings:
            raise ValueError(
                "If you are only finetuning a subset of the layers, you cannot finetune embeddings."
            )

        self.label_encoder = None
        self._initialize()
        self.target_dim = None
        self._load_from_file = False

    def _initialize(self):
        # Initializes the non-serialized bits of the class.
        self._set_random_seed(self.config.seed)

        download_data_if_required()
        self.encoder = TextEncoder()

        # symbolic ops
        self.logits = None  # classification logits
        self.target_loss = None  # cross-entropy loss
        self.lm_losses = None  # language modeling losses
        self.lm_predict_op = None
        self.train = None  # gradient + parameter update
        self.features = None  # hidden representation fed to classifier
        self.summaries = None  # Tensorboard summaries
        self.train_writer = None
        self.valid_writer = None
        self.predict_params = None
        self.train_op = None
        self.predict_op = None
        self.predict_proba_op = None
        self.sess = None
        self.noop = tf.no_op()

        # indicator vars
        self.is_built = False  # has tf graph been constructed?
        self.is_trained = False  # has model been fine-tuned?
        self.require_lm = False

        def process_embeddings(name, value):
            if "/we:0" not in name:
                return value

            vocab_size = self.encoder.vocab_size
            word_embeddings = value[:vocab_size -
                                    len(self.encoder.special_tokens)]
            special_embed = value[len(word_embeddings):vocab_size]
            positional_embed = value[vocab_size:]
            if self.config.interpolate_pos_embed and self.config.max_length != len(
                    positional_embed):
                positional_embed = interpolate_pos_embed(
                    positional_embed, self.config.max_length)
            elif self.config.max_length > len(positional_embed):
                raise ValueError(
                    "Max Length cannot be greater than {} if interploate_pos_embed is turned off"
                    .format(len(positional_embed)))
            else:
                positional_embed = positional_embed[:self.config.max_length]

            embeddings = np.concatenate(
                (word_embeddings, special_embed, positional_embed), axis=0)
            return embeddings

        self.saver = Saver(
            fallback_filename=JL_BASE,
            exclude_matches=None if self.config.save_adam_vars else "adam",
            variable_transforms=[process_embeddings])

    def _format_for_encoding(self, Xs):
        """
        Most subclasses take in inputs as:
            List (batch) of list (docs)
        
        Encode_multi_input expect the following format:
            List (batch) of list (docs) of list (subseqs) of text
        
        This method is responsible for standardizing inputs to the above format
        """
        return [[[x] for x in X] for X in Xs]

    def _text_to_ids(self, Xs, Y=None, max_length=None):
        # Maps lists of text to formatted numpy arrays of token ids and loss-masks marking the lengths of the sequences.
        max_length = max_length or self.config.max_length

        # If 1d array of text is passed, coerce into multifield format
        if len(Xs) and isinstance(Xs[0], (bytes, str)):
            Xs = [[x] for x in Xs]

        Xs = self._format_for_encoding(Xs)

        if self.config.chunk_long_sequences and len(Xs[0]) == 1:
            # can only chunk single sequence inputs
            chunk_size = max_length - 2
            step_size = chunk_size // 3
            encoded = self.encoder.encode_multi_input(Xs,
                                                      Y=Y,
                                                      max_length=sys.maxsize)

            d = defaultdict(list)
            for idx in range(len(encoded.token_ids)):
                starts = list(range(0, len(encoded.token_ids[idx]), step_size))
                for start in starts:
                    end = start + chunk_size

                    for field in EncodedOutput._fields:
                        field_value = getattr(encoded, field)
                        if field_value is not None:
                            d[field].append(field_value[idx][start:end])

            encoder_out = EncodedOutput(**d)
            return self._array_format(encoder_out)
        else:
            encoder_out = self.encoder.encode_multi_input(
                Xs, Y=Y, max_length=max_length)
            return self._array_format(encoder_out)

    @abstractmethod
    def _predict_op(self, logits, **kwargs):
        raise NotImplementedError

    @abstractmethod
    def _predict_proba_op(self, logits, **kwargs):
        raise NotImplementedError

    @abstractmethod
    def _target_model(self,
                      *,
                      featurizer_state,
                      targets,
                      n_outputs,
                      train=False,
                      reuse=None,
                      **kwargs):
        # Overridden by subclass to attach a target model onto the shared base featurizer.
        raise NotImplementedError

    @abstractmethod
    def _target_encoder(self):
        # Overridden by subclass to produce the right target encoding for a given target model.
        raise NotImplementedError

    def _eval(self, *tensors, feed_dict):
        """
        Evaluate the value of each of the provided tensors.
        Returns a `dict` that maps from tensor to result value.  
        If any result value is None, that result is excluded from the results `dict`.
        """
        tensors = [
            tensor if tensor is not None else self.noop for tensor in tensors
        ]
        values = self.sess.run(tensors, feed_dict=feed_dict)
        return {
            tensor: value
            for tensor, value in zip(tensors, values) if value is not None
        }

    def finetune(self, Xs, Y=None, batch_size=None):
        arr_encoded = self._text_to_ids(Xs)
        return self._training_loop(
            arr_encoded,
            Y=Y,
            batch_size=batch_size,
        )

    def _training_loop(self, arr_encoded, Y=None, batch_size=None):
        self.label_encoder = self._target_encoder()

        idxs = list(range(len(arr_encoded.token_ids)))
        train_idxs, val_idxs = train_test_split(idxs,
                                                test_size=self.config.val_size)

        if Y is None:
            # only language model will be trained, mock fake target of right length
            train_Y = np.asarray([[]] * len(train_idxs))
            val_Y = np.asarray([[]] * len(val_idxs))
            target_dim = None
        else:
            Y = np.asarray(Y)
            train_Y = self.label_encoder.fit_transform(Y[train_idxs])
            val_Y = self.label_encoder.transform(Y[val_idxs])
            target_dim = self.label_encoder.target_dim

        batch_size = batch_size or self.config.batch_size
        n_batch_train = batch_size * max(len(self.config.visible_gpus), 1)
        n_examples = len(train_idxs)
        n_updates_total = (n_examples // n_batch_train) * self.config.n_epochs

        train_dataset = (arr_encoded.token_ids[train_idxs],
                         arr_encoded.mask[train_idxs], train_Y)
        val_dataset = (arr_encoded.token_ids[val_idxs],
                       arr_encoded.mask[val_idxs], val_Y)

        self._build_model(n_updates_total=n_updates_total,
                          target_dim=target_dim)
        self.is_trained = True

        avg_train_loss = None
        avg_val_loss = None
        global_step = 0
        best_val_loss = float("inf")
        val_window = [float("inf")] * self.config.val_window_size

        for i in range(self.config.n_epochs):
            iterator = iter_data(*train_dataset,
                                 n_batch=n_batch_train,
                                 tqdm_desc="Epoch {}".format(i),
                                 verbose=self.config.verbose)
            for (xmb, mmb, ymb) in iterator:
                feed_dict = {
                    self.X: xmb,
                    self.M: mmb,
                }
                if target_dim:
                    feed_dict[self.Y] = ymb

                global_step += 1
                if global_step % self.config.val_interval == 0:
                    feed_dict[self.do_dropout] = DROPOUT_OFF

                    outputs = self._eval(self.summaries, feed_dict=feed_dict)
                    if self.train_writer is not None:
                        self.train_writer.add_summary(
                            outputs.get(self.summaries), global_step)

                    sum_val_loss = 0
                    for xval, mval, yval in iter_data(
                            *val_dataset,
                            n_batch=n_batch_train,
                            verbose=self.config.verbose,
                            tqdm_desc="Validation"):
                        feed_dict = {
                            self.X: xval,
                            self.M: mval,
                            self.do_dropout: DROPOUT_OFF
                        }
                        if target_dim:
                            feed_dict[self.Y] = yval

                        outputs = self._eval(self.target_loss,
                                             self.summaries,
                                             feed_dict=feed_dict)
                        if self.valid_writer is not None:
                            self.valid_writer.add_summary(
                                outputs.get(self.summaries), global_step)

                        val_cost = outputs.get(self.target_loss, 0)
                        sum_val_loss += val_cost

                        if avg_val_loss is None:
                            avg_val_loss = val_cost
                        else:
                            avg_val_loss = (
                                avg_val_loss * self.config.rolling_avg_decay +
                                val_cost * (1 - self.config.rolling_avg_decay))
                    val_window.append(sum_val_loss)
                    val_window.pop(0)

                    if np.mean(val_window) <= best_val_loss:
                        best_val_loss = np.mean(val_window)
                        if self.config.autosave_path is not None:
                            self.save(self.config.autosave_path)

                    tqdm.tqdm.write(
                        "Train loss: {}\t Validation loss: {}".format(
                            avg_train_loss, avg_val_loss))

                feed_dict[self.do_dropout] = DROPOUT_ON
                outputs = self._eval(self.target_loss,
                                     self.train_op,
                                     feed_dict=feed_dict)

                cost = outputs.get(self.target_loss, 0)
                if avg_train_loss is None:
                    avg_train_loss = cost
                else:
                    avg_train_loss = avg_train_loss * self.config.rolling_avg_decay + cost * (
                        1 - self.config.rolling_avg_decay)

        return self

    def fit(self, *args, **kwargs):
        """ An alias for finetune. """
        return self.finetune(*args, **kwargs)

    def _predict(self, Xs, max_length=None):
        predictions = []
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore")
            max_length = max_length or self.config.max_length
            for xmb, mmb in self._infer_prep(Xs, max_length=max_length):
                output = self._eval(self.predict_op,
                                    feed_dict={
                                        self.X: xmb,
                                        self.M: mmb,
                                        self.do_dropout: DROPOUT_OFF
                                    })
                prediction = output.get(self.predict_op)
                formatted_predictions = self.label_encoder.inverse_transform(
                    prediction)
                predictions.append(formatted_predictions)
        return np.concatenate(predictions).tolist()

    def predict(self, Xs, max_length=None):
        return self._predict(Xs, max_length=max_length)

    def _predict_proba(self, Xs, max_length=None):
        """
        Produce raw numeric outputs for proba predictions
        """
        predictions = []
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore")
            max_length = max_length or self.config.max_length
            for xmb, mmb in self._infer_prep(Xs, max_length=max_length):
                output = self._eval(self.predict_proba_op,
                                    feed_dict={
                                        self.X: xmb,
                                        self.M: mmb,
                                        self.do_dropout: DROPOUT_OFF
                                    })
                probas = output.get(self.predict_proba_op)
                predictions.extend(probas)
        return predictions

    def predict_proba(self, *args, **kwargs):
        """
        The base method for predicting from the model.
        """
        raw_probas = self._predict_proba(*args, **kwargs)
        classes = self.label_encoder.classes_

        formatted_predictions = []
        for probas in raw_probas:
            formatted_predictions.append(dict(zip(classes, probas)))
        return formatted_predictions

    def _featurize(self, Xs, max_length=None):
        features = []
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore")
            max_length = max_length or self.config.max_length
            for xmb, mmb in self._infer_prep(Xs, max_length=max_length):
                feature_batch = self.sess.run(self.features, {
                    self.X: xmb,
                    self.M: mmb,
                    self.do_dropout: DROPOUT_OFF
                })
                features.append(feature_batch)
        return np.concatenate(features)

    @abstractmethod
    def featurize(self, *args, **kwargs):
        """
        Base method to get raw features out of the model.
        These features are the same that are fed into the target_model.
        """
        return self._featurize(*args, **kwargs)

    @classmethod
    def get_eval_fn(cls):
        raise NotImplementedError(
            "No default eval function is given, please pass an explicit eval fn to grid_search"
        )

    def transform(self, *args, **kwargs):
        """
        An alias for `featurize`.
        """
        return self.featurize(*args, **kwargs)

    def _infer_prep(self, Xs, max_length=None):
        max_length = max_length or self.config.max_length
        arr_encoded = self._text_to_ids(Xs, max_length=max_length)
        n_batch_train = self.config.batch_size * max(
            len(self.config.visible_gpus), 1)
        self._build_model(n_updates_total=0,
                          target_dim=self.target_dim,
                          train=False)
        yield from iter_data(arr_encoded.token_ids,
                             arr_encoded.mask,
                             n_batch=n_batch_train,
                             verbose=self.config.verbose)

    def _array_format(self, encoded_output):
        """
        Returns numpy array of token idxs and corresponding mask
        Returned `x` array contains two channels:
            0: byte-pair encoding embedding
            1: positional embedding
        """
        n = len(encoded_output.token_ids)
        seq_lengths = [len(x) for x in encoded_output.token_ids]
        x = np.zeros((n, self.config.max_length, 2), dtype=np.int32)
        mask = np.zeros((n, self.config.max_length), dtype=np.float32)
        labels_arr = np.full(
            (n, self.config.max_length), PAD_TOKEN,
            dtype='object') if encoded_output.labels is not None else None
        for i, seq_length in enumerate(seq_lengths):
            # BPE embedding
            x[i, :seq_length, 0] = encoded_output.token_ids[i]
            # masking: value of 1 means "consider this in cross-entropy LM loss"
            mask[i, 1:seq_length] = 1
            if encoded_output.labels:
                labels_arr[i, :seq_length] = encoded_output.labels[i]
        # positional_embeddings
        x[:, :,
          1] = np.arange(self.encoder.vocab_size,
                         self.encoder.vocab_size + self.config.max_length)

        return ArrayEncodedOutput(
            token_ids=x,
            tokens=encoded_output.tokens,
            labels=labels_arr,
            char_locs=encoded_output.char_locs,
            mask=mask,
        )

    def _compile_train_op(self, *, params, grads, n_updates_total):
        grads = average_grads(grads)

        if self.config.summarize_grads:
            self.summaries += tf.contrib.training.add_gradients_summaries(
                grads)

        grads = [grad for grad, param in grads]
        self.train_op = AdamWeightDecay(
            params=params,
            grads=grads,
            lr=self.config.lr,
            schedule=partial(schedules[self.config.lr_schedule],
                             warmup=self.config.lr_warmup),
            t_total=n_updates_total,
            l2=self.config.l2_reg,
            max_grad_norm=self.config.max_grad_norm,
            vector_l2=self.config.vector_l2,
            b1=self.config.b1,
            b2=self.config.b2,
            e=self.config.epsilon,
            pretrained_weights=self.saver.get_pretrained_weights(),
            deviation_regularization=self.config.regularize_deviation)

    def _construct_graph(self, n_updates_total, target_dim=None, train=True):
        gpu_grads = []
        self.summaries = []

        # store whether or not graph was previously compiled with dropout
        self.train = train
        self._define_placeholders(target_dim=target_dim)

        aggregator = defaultdict(list)
        train_loss_tower = 0
        gpus = self.config.visible_gpus
        n_splits = max(len(gpus), 1)

        # multi-GPU setup, using CPU as param server is most efficient unless system has direct GPU connections
        # single GPU, no need to use a different GPU as a parameter server
        params_device = 'cpu' if len(gpus) != 1 else gpus[0]

        # decide on setting for language model loss coefficient
        # if the language model loss does not contribute to overall loss,
        # remove the language model computation from the graph
        lm_loss_coef = self.config.lm_loss_coef
        if target_dim is None:
            lm_loss_coef = 1.0
        compile_lm = (train and lm_loss_coef > 0) or self.require_lm

        for i, (X, M, Y) in enumerate(
                soft_split(self.X, self.M, self.Y, n_splits=n_splits)):
            do_reuse = True if i > 0 else tf.AUTO_REUSE

            if gpus:
                device = tf.device(
                    assign_to_gpu(gpus[i], params_device=params_device))
            else:
                device = tf.device('cpu')

            scope = tf.variable_scope(tf.get_variable_scope(), reuse=do_reuse)

            with device, scope:
                featurizer_state = featurizer(
                    X,
                    config=self.config,
                    encoder=self.encoder,
                    dropout_placeholder=self.do_dropout,
                    train=train,
                    reuse=do_reuse)

                if compile_lm:
                    language_model_state = language_model(
                        X=X,
                        M=M,
                        config=self.config,
                        embed_weights=featurizer_state['embed_weights'],
                        hidden=featurizer_state['sequence_features'],
                        reuse=do_reuse)

                    train_loss = lm_loss_coef * tf.reduce_mean(
                        language_model_state['losses'])
                    aggregator['lm_losses'].append(
                        language_model_state['losses'])
                    lm_logits = language_model_state["logits"]
                    aggregator["lm_model"].append(
                        sample_with_temperature(lm_logits,
                                                self.config.lm_temp))
                else:
                    train_loss = 0

                aggregator['features'].append(featurizer_state['features'])

                if target_dim is not None:
                    with tf.variable_scope('model/target'):
                        target_model_state = self._target_model(
                            featurizer_state=featurizer_state,
                            targets=Y,
                            n_outputs=target_dim,
                            train=train,
                            reuse=do_reuse,
                            max_length=self.config.max_length)
                    train_loss += (1 - lm_loss_coef) * tf.reduce_mean(
                        target_model_state['losses'])
                    train_loss_tower += train_loss

                    aggregator['logits'].append(target_model_state['logits'])
                    aggregator['target_losses'].append(
                        target_model_state['losses'])

                params = find_trainable_variables("model")
                grads = tf.gradients(train_loss, params)
                grads = list(zip(grads, params))
                gpu_grads.append(grads)

        with tf.device(params_device):
            self.features = tf.concat(aggregator['features'], axis=0)

            if compile_lm:
                self.lm_predict_op = tf.concat(aggregator["lm_model"], 0)
                self.lm_losses = tf.concat(aggregator['lm_losses'], axis=0)
                self.lm_loss = tf.reduce_mean(self.lm_losses)
                self.summaries.append(
                    tf.summary.scalar('LanguageModelLoss', self.lm_loss))

            if train:
                self._compile_train_op(params=params,
                                       grads=gpu_grads,
                                       n_updates_total=n_updates_total)

            if target_dim is not None:
                self.logits = tf.concat(aggregator['logits'], axis=0)
                self.target_losses = concat_or_stack(
                    aggregator['target_losses'])

                self.predict_op = self._predict_op(
                    self.logits,
                    **target_model_state.get("predict_params", {}))
                self.predict_proba_op = self._predict_proba_op(
                    self.logits,
                    **target_model_state.get("predict_params", {}))
                self.target_loss = tf.reduce_mean(self.target_losses)

                self.summaries.append(
                    tf.summary.scalar('TargetModelLoss', self.target_loss))
                self.summaries.append(
                    tf.summary.scalar('TotalLoss',
                                      train_loss_tower / n_splits))

            self.summaries = tf.summary.merge(
                self.summaries) if self.summaries else self.noop

    def _build_model(self, n_updates_total, target_dim, train=True):
        """
        Construct tensorflow symbolic graph.
        """
        if not self.is_trained or train != self.train or self.target_dim != target_dim:
            # reconstruct graph to include/remove dropout
            # if `train` setting has changed
            self._construct_graph(n_updates_total, target_dim, train=train)

        self._initialize_session()
        self.saver.initialize(self.sess)

        self.target_dim = target_dim
        if train:
            if self.config.tensorboard_folder is not None:
                if not os.path.exists(self.config.tensorboard_folder):
                    os.mkdir(self.config.tensorboard_folder)
                self.train_writer = tf.summary.FileWriter(
                    self.config.tensorboard_folder + '/train', self.sess.graph)
                self.valid_writer = tf.summary.FileWriter(
                    self.config.tensorboard_folder + '/valid', self.sess.graph)
        self.is_built = True

    def _initialize_session(self):
        if self.sess is None:
            gpus = self.config.visible_gpus
            os.environ['CUDA_VISIBLE_DEVICES'] = ",".join(
                [str(gpu) for gpu in gpus])
            conf = tf.ConfigProto(
                allow_soft_placement=self.config.soft_device_placement,
                log_device_placement=self.config.log_device_placement)
            self.sess = tf.Session(config=conf)

    def _set_random_seed(self, seed=None):
        seed = seed or self.config.seed
        random.seed(seed)
        np.random.seed(seed)
        tf.set_random_seed(seed)

    def _target_placeholder(self, target_dim=None):
        return tf.placeholder(
            tf.float32, [None, target_dim or 1])  # classification targets

    def _define_placeholders(self, target_dim=None):
        # tf placeholders
        self.X = tf.placeholder(tf.int32,
                                [None, self.config.max_length, 2
                                 ])  # token idxs (BPE embedding + positional)
        self.M = tf.placeholder(
            tf.float32, [None, self.config.max_length])  # sequence mask
        # when target dim is not set, an array of [None] targets is passed as a placeholder

        self.do_dropout = tf.placeholder(
            tf.float32)  # 1 for do dropout and 0 to not do dropout
        self.Y = self._target_placeholder(target_dim=target_dim)

    def generate_text(self, seed_text='', max_length=None):
        """
        Performs a prediction on the Language modeling objective given some seed text. It uses a noisy greedy decoding.
        Temperature parameter for decoding is set in the config.

        :param max_length: The maximum length to decode to.
        :param seed_text: Defaults to the empty string. This will form the starting point to begin modelling
        :return: A string containing the generated text.
        """
        self.require_lm = True
        encoded = self.encoder._encode([seed_text])
        self._build_model(n_updates_total=0,
                          target_dim=self.target_dim,
                          train=False)
        string = [self.encoder['_start_']] + encoded.token_ids[0]
        EOS = self.encoder['_classify_']
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore")
            for i in range(len(encoded.token_ids),
                           (max_length or self.config.max_length) - 1):
                arr_encoded = self._array_format(encoded)
                class_idx = self.sess.run(self.lm_predict_op, {
                    self.X: arr_encoded.token_ids,
                    self.M: arr_encoded.mask
                })
                string.append(class_idx[i])
                if string[-1] == EOS:
                    break
        return self.encoder.decode(string)

    def __getstate__(self):
        """
        Leave serialization of all tf objects to tf
        """
        required_fields = [
            'label_encoder',
            'target_dim',
            '_load_from_file',
            'config',
            'target_type',
        ]
        serialized_state = {
            k: v
            for k, v in self.__dict__.items() if k in required_fields
        }
        return serialized_state

    def save(self, path):
        """
        Saves the state of the model to disk to the folder specific by `path`.  If `path` does not exist, it will be auto-created.

        Save is performed in two steps:
            - Serialize tf graph to disk using tf.Saver
            - Serialize python model using pickle

        Note:
            Does not serialize state of Adam optimizer.
            Should not be used to save / restore a training model.
        """
        if path is None:
            return

        path = os.path.abspath(path)
        self.saver.save(self, path)
        self._load_from_file = False

    @classmethod
    def load(cls, path):
        """
        Load a saved fine-tuned model from disk.  Path provided should be a folder which contains .pkl and tf.Saver() files

        :param path: string path name to load model from.  Same value as previously provided to :meth:`save`. Must be a folder.
        """
        saver = Saver(JL_BASE)
        model = saver.load(path)
        model._initialize()
        model.saver.variables = saver.variables
        tf.reset_default_graph()
        return model

    @classmethod
    def finetune_grid_search(cls,
                             Xs,
                             Y,
                             *,
                             test_size,
                             config=None,
                             eval_fn=None,
                             probs=False,
                             return_all=False):
        """
        Performs grid search over config items defined using "GridSearchable" objects and returns either full results or
        the config object that relates to the best results. The default config contains grid searchable objects for the
        most important parameters to search over.

        :param Xs: Input text. Either [num_samples] or [sequence, num_samples] for single or multi input models respectively.
        :param Y: Targets, A list of targets, [num_samples] that correspond to each sample in Xs.
        :param test_size: Int or float. If an int is given this number of samples is used to validate, if a float is
         given then that fraction of samples is used.
        :param config: A config object, or None to use the default config.
        :param eval_fn: An eval function that takes 2 inputs (prediction, truth) and returns a float, with a max value being desired.
        :param probs: If true, eval_fn is passed probability outputs from predict_proba, otherwise the output of predict is used.
        :param return_all: If True, all results are returned, if False, only the best config is returned.
        :return: default is to return the best config object. If return_all is true, it returns a list of tuples of the
            form [(config, eval_fn output), ... ]

        """
        if isinstance(Xs[0], str):
            Xs = [Xs]
        config = config or get_default_config()
        config.val_size = 0.0
        eval_fn = eval_fn or cls.get_eval_fn()

        trainXs, testXs, trainY, testY = train_test_split(list_transpose(Xs),
                                                          Y,
                                                          test_size=test_size,
                                                          shuffle=True)
        trainXs = list_transpose(trainXs)
        testXs = list_transpose(testXs)
        gs = config.get_grid_searchable()
        ranged_keys = gs.keys()
        ranged_iterators = gs.values()
        grid_gen = itertools.product(*ranged_iterators)
        results = []
        for grid_item in grid_gen:
            config_ = deepcopy(config)
            config_.update(dict(zip(ranged_keys, grid_item)))
            instance = cls(config=config_)
            instance.finetune(*trainXs, Y=trainY)
            if probs:
                res = instance.predict_proba(*testXs)
            else:
                res = instance.predict(*testXs)
            results.append((config_, eval_fn(res, testY)))
            del instance

        if return_all:
            return results
        return max(results, key=lambda x: x[1])[0]

    @classmethod
    def finetune_grid_search_cv(cls,
                                Xs,
                                Y,
                                *,
                                n_splits,
                                test_size,
                                config=None,
                                eval_fn=None,
                                probs=False,
                                return_all=False):
        """
        Performs cross validated grid search over config items defined using "GridSearchable" objects and returns either full results or
        the config object that relates to the best results. The default config contains grid searchable objects for the
        most important parameters to search over.

        It should be noted that the cv splits are not guaranteed unique, but each split is given to each set of hparams.

        :param Xs: Input text. Either [num_samples] or [sequence, num_samples] for single or multi input models respectively.
        :param Y: Targets, A list of targets, [num_samples] that correspond to each sample in Xs.
        :param n_splits: Number of CV splits to do.
        :param test_size: Int or float. If an int is given this number of samples is used to validate, if a float is
            given then that fraction of samples is used.
        :param config: A config object, or None to use the default config.
        :param eval_fn: An eval function that takes 2 batches of outputs and returns a float, with a max value being
            desired. An arithmetic mean must make sense for this metric.
        :param probs: If true, eval_fn is passed probability outputs from predict_proba, otherwise the output of predict is used.
        :param return_all: If True, all results are returned, if False, only the best config is returned.
        :return: default is to return the best config object. If return_all is true, it returns a list of tuples of the
            form [(config, eval_fn output), ... ]
        """
        results = []
        for _ in range(n_splits):
            res = cls.finetune_grid_search(Xs,
                                           Y,
                                           test_size=test_size,
                                           probs=probs,
                                           eval_fn=eval_fn,
                                           config=config,
                                           return_all=True)
            results.append(res)
        results = list(zip(*results))
        aggregated_results = []
        for configuration in results:
            config_common = None
            sum_res = 0
            n_res = 0
            for config, result in configuration:
                config_common = config_common or config
                assert config == config_common
                n_res += 1
                sum_res += result
            aggregated_results.append((config_common, sum_res / n_res))

        if return_all:
            return aggregated_results

        return max(aggregated_results, key=lambda x: x[1])[0]

    def __del__(self):
        try:
            if self.sess is not None:
                self.sess.close()
        except AttributeError:
            pass