def warmup_predictor(nn_model, prediction_mode):
    if prediction_mode in {
            PREDICTION_MODES.beamsearch_reranking,
            PREDICTION_MODES.sampling_reranking
    }:
        prediction_config = PredictionConfig(prediction_mode)
        predictor_factory(nn_model, prediction_mode,
                          prediction_config.get_options_dict())
Beispiel #2
0
def get_nn_response_ids(context_token_ids,
                        nn_model,
                        mode,
                        output_candidates_num=1,
                        output_seq_len=MAX_PREDICTIONS_LENGTH,
                        condition_ids=None,
                        **kwargs):
    """
    Predicts several responses for every context.

    :param context_token_ids: np.array; shape (batch_size, context_size, context_len); dtype=int
        Represents all tokens ids to use for predicting
    :param nn_model: CakeChatModel
    :param mode: one of PREDICTION_MODES mode
    :param output_candidates_num: Number of candidates to generate.
        When mode is either 'beamsearch', 'beamsearch-reranking'  or 'sampling-reranking', the candidates with the
        highest score are returned. When mode is 'sampling', the candidates_num of samples are generated independently.
    :param condition_ids: List with ids of conditions responding for each context.
    :param output_seq_len: Number of tokens to generate.
    :param kwargs: Other prediction parameters, passed into predictor constructor.
        Might be different depending on mode. See PredictionConfig for the details.
    :return: np.array; shape (batch_size, output_candidates_num, output_seq_len); dtype=int
        Generated predictions.
    """
    if mode == PREDICTION_MODES.sampling:
        kwargs['samples_num'] = output_candidates_num

    prediction_config = PredictionConfig(mode, **kwargs)
    _logger.debug('Generating predicted response for the following params: %s' % prediction_config)

    predictor = predictor_factory(nn_model, mode, prediction_config.get_options_dict())
    responses = predictor.predict_responses(context_token_ids, output_seq_len, condition_ids, output_candidates_num)

    return responses
Beispiel #3
0
def get_nn_response_ids(context_token_ids,
                        nn_model,
                        mode,
                        output_candidates_num=1,
                        output_seq_len=MAX_PREDICTIONS_LENGTH,
                        condition_ids=None,
                        **kwargs):
    """
    Predicts several responses for every context.

    :param context_token_ids: np.array; shape=(batch_size x context_size x context_len); dtype=int
        Represents all tokens ids to use for predicting
    :param nn_model: CakeChatModel
    :param mode: one of PREDICTION_MODES mode
    :param output_candidates_num: Number of candidates to generate.
        When mode is either 'beamsearch', 'beamsearch-reranking'  or 'sampling-reranking', the candidates with the
        highest score are returned. When mode is 'sampling', the candidates_num of samples are generated independently.
    :param condition_ids: List with ids of conditions responding for each context.
    :param output_seq_len: Number of tokens to generate.
    :param kwargs: Other prediction parameters, passed into predictor constructor.
        Might be different depending on mode. See PredictionConfig for the details.
    :return: np.array; shape=(responses_num x output_candidates_num x output_seq_len); dtype=int
        Generated predictions.
    """
    if mode == PREDICTION_MODES.sampling:
        kwargs['samples_num'] = output_candidates_num

    prediction_config = PredictionConfig(mode, **kwargs)
    _logger.debug('Generating predicted response for the following params: %s' % prediction_config)

    predictor = predictor_factory(nn_model, mode, prediction_config.get_options_dict())
    return np.array(
        predictor.predict_responses(context_token_ids, output_seq_len, condition_ids, output_candidates_num))
Beispiel #4
0
def warmup_predictor(nn_model, prediction_mode):
    if prediction_mode in {PREDICTION_MODES.beamsearch_reranking, PREDICTION_MODES.sampling_reranking}:
        prediction_config = PredictionConfig(prediction_mode)
        predictor_factory(nn_model, prediction_mode, prediction_config.get_options_dict())