コード例 #1
0
    def generate(self,
                 context,
                 prompt='',
                 temperature=None,
                 top_p=None,
                 top_k=None,
                 repetition_penalty=None,
                 depth=0):
        assert (top_k is not None)
        assert (temperature is not None)
        assert (top_p)
        assert (repetition_penalty)
        # logger.debug("BEFORE PROMPT_REPLACE: `%r`", prompt)

        # prompt = [self.prompt_replace(p) for p in prompt]

        # logger.debug("AFTER PROMPT_REPLACE is: `%r`", repr(prompt))
        assert (prompt + context)

        text = self.generate_raw(context,
                                 prompt,
                                 temperature=temperature,
                                 top_k=top_k,
                                 top_p=top_p,
                                 repetition_penalty=repetition_penalty,
                                 stop_tokens=torch.tensor(
                                     [[self.tokenizer.eos_token_id]]))

        logger.debug("Generated result is: `%r`", repr(text))

        result = self.result_replace(text)

        if (depth > 6) and len(result) == 0:
            # Sometimes it keeps generating a story startng with an action (">"), if it's tried a few times and it keeps
            # happening, lets let it keep action text which starts in ">"
            # We could just blacklist that token and force it to generate something else. TODO
            result = self.result_replace(text, allow_action=True)
            logger.info(
                "Model generated empty text after formatting `%r`. Trying to format less with allow_action=True. `%r`",
                text,
                result,
            )

            # same here as above
        if len(result) == 0:
            if depth < 20:
                logger.info("Model generated empty text trying again %r",
                            depth)
                return self.generate(prompt,
                                     context,
                                     temperature=temperature,
                                     top_p=top_p,
                                     top_k=top_k,
                                     repetition_penalty=repetition_penalty,
                                     depth=depth + 1)
            else:
                logger.warn(
                    "Model generated empty text %r times. Try another action",
                    depth)
        return result
コード例 #2
0
    def generate(self, prompt, options=None, seed=None, depth=0):
        logger.debug("BEFORE PROMPT_REPLACE: `%r`", prompt)

        prompt = [self.prompt_replace(p) for p in prompt]

        # logger.debug("AFTER PROMPT_REPLACE is: `%r`", repr(prompt))

        text = self.generate_raw(prompt,
                                 stop_tokens=self.tokenizer.encode(
                                     ["<|endoftext|>", ">"]))

        logger.debug("Generated result is: `%r`", repr(text))

        result = self.result_replace(text)

        if (depth > 6) and len(result) == 0:
            # Sometimes it keeps generating a story startng with an action (">"), if it's tried a few times and it keeps
            # happening, lets let it keep action text which starts in ">"
            result = self.result_replace(text, allow_action=True)
            logger.info(
                "Model generated empty text after formatting `%r`. Trying to format less with allow_action=True. `%r`",
                text,
                result,
            )

        if len(result) == 0:
            if depth < 20:
                logger.info("Model generated empty text trying again %r",
                            depth)
                return self.generate(prompt + [" {}".format(depth)],
                                     seed=depth,
                                     depth=depth + 1)
            else:
                logger.warn(
                    "Model generated empty text %r times. Try another action",
                    depth)
        return result