Ejemplo n.º 1
0
def instructions():
    if settings.getboolean("console-bell"):
        bell = "on"
    else:
        bell = "off"
    if settings.getboolean("action-d20"):
        d20 = "on"
    else:
        d20 = "off"
    print(
        '\033[' + colors["instructions"] + 'm' +
        'AID2: Clover Edition Instructions: \n Enter actions starting with a verb ex. "go to the tavern" or "attack the orc."\n To speak enter say "(thing you want to say)" or just "(thing you want to say)"'
    )
    print('The following commands can be entered for any action:')
    print(
        '  "/revert"            Reverts the last action allowing you to pick a different action.'
    )
    print('  "/quit"              Quits the game and saves')
    print(
        '  "/menu"              Starts a new game and saves your current one')
    print('  "/retry"             Retries the last action')
    print('  "/restart"           Restarts the current story')
    print(
        '  "/print"             Prints a transcript of your adventure (without extra newline formatting)'
    )
    print('  "/help"              Prints these instructions again')
    print(
        '  "/set SETTING VALUE" Sets the specified setting to the specified value.:'
    )
    print(
        '      temp            Higher values make the AI more random. Default: 0.4 | Current:',
        settings.getfloat("temp"))
    print(
        '      rep-pen         Controls how repetitive the AI is allowed to be. Default: 1.2 | Current:',
        settings.getfloat("rep-pen"))
    print(
        '      text-wrap-width Maximum width of lines printed by computer. Default: 80 | Current:',
        settings.getint("text-wrap-width"))
    print(
        '      console-bell    Beep after AI generates text? Default: on | Current:',
        bell)
    print(
        '      top-keks        Number of words the AI can randomly choose. Default: 20 | Current:',
        settings.getint("top-keks"))
    print('      generate-num    Default: 60 | Current:',
          settings.getint("generate-num"))
    print('      top-p           Default: 0.9 | Current:',
          settings.getfloat("top-p"))
    print('      log-level       Default: 3 | Current:',
          settings.getint("log-level"))
    print(
        '      action-sugg     How many actions to generate, 0 is off. Default: 4 | Current:',
        settings.getint("action-sugg"))
    print(
        '      action-d20      Make actions difficult. Default: on | Current:',
        d20)
    print(
        '      action-temp     How random the suggested actions are. Default: 1 | Current:',
        settings.getfloat("action-temp"), '\033[39m')
Ejemplo n.º 2
0
def clear_lines(n):
    """Clear the last line in the terminal."""
    if in_colab() or settings.getboolean('colab-mode'):
        # this wont work in colab etc
        return
    screen_code = "\033[1A[\033[2K"  # up one line, and clear line
    for _ in range(n):
        print(screen_code, end="\r")
Ejemplo n.º 3
0
    def __init__(self,
                 generate_num=60,
                 temperature=0.4,
                 top_k=40,
                 top_p=0.9,
                 dtype=DTYPE,
                 model_path: Union[str,
                                   Path] = Path('models',
                                                'pytorch-gpt2-xl-aid2-v5'),
                 repetition_penalty=1,
                 repetition_penalty_range=512,
                 repetition_penalty_slope=3.33):
        self.generate_num = generate_num
        self.temp = temperature
        self.top_k = top_k
        self.top_p = top_p
        self.samples = 1
        self.dtype = dtype
        self.repetition_penalty = repetition_penalty
        self.repetition_penalty_range = repetition_penalty_range
        self.repetition_penalty_slope = repetition_penalty_slope
        self.batch_size = 1
        self.max_history_tokens = 1024 - generate_num
        self.stop_token = "<|endoftext|>"

        if isinstance(model_path, str):
            self.checkpoint_path = model_path
            logger.warning(
                f"Using DEBUG MODE! This will load one of the generic (non-finetuned) GPT2 models. "
                f"Selected: {model_path}")
        elif isinstance(model_path, Path):
            self.checkpoint_path = model_path
            if not self.checkpoint_path.exists():
                raise FileNotFoundError(
                    "Could not find {} Make sure to download a pytorch model and put it in the models directory!"
                    .format(str(self.checkpoint_path)))
        else:
            raise ValueError(
                f"model_path must be either str or Path, got {type(model_path)}"
            )

        self.device = torch.device("cuda" if self.dtype ==
                                   torch.float16 else "cpu")
        logger.info("Using device={}, checkpoint={}, dtype={}".format(
            self.device, str(self.checkpoint_path), self.dtype))

        # Load tokenizer and model
        model_class, tokenizer_class = MODEL_CLASSES[
            "gpt2-experimental"] if settings.getboolean(
                "gpt2_experimental") else MODEL_CLASSES["gpt2"]
        if "gpt-neo" in str(model_path):
            self.max_history_tokens = 2048 - generate_num
            model_class = GPTNeoForCausalLM
        self.tokenizer = tokenizer_class.from_pretrained(
            str(self.checkpoint_path))
        self.model = model_class.from_pretrained(str(self.checkpoint_path))
        self.model.to(self.dtype).to(self.device)
        self.model.eval()
Ejemplo n.º 4
0
def sample_sequence(model,
                    length,
                    context,
                    temperature=1,
                    top_k=0,
                    top_p=0.9,
                    repetition_penalty=1.0,
                    device="cpu",
                    stop_tokens=None,
                    tokenizer=None):
    """Actually generate the tokens"""
    logger.debug('temp: {}    top_k: {}    top_p: {}    rep-pen: {}'.format(
        temperature, top_k, top_p, repetition_penalty))

    max_length = context.shape[1] + length  # check to see if greater than 2048?

    if settings.getboolean('force-cpu'):
        context = context.long().cpu()
    else:
        context = context.long().cuda()

    out = model.generate(
        context,
        do_sample=True,
        min_length=max_length,
        max_length=max_length,
        temperature=temperature,
        top_k=top_k,
        top_p=top_p,
        repetition_penalty=repetition_penalty,
        repetition_penalty_range=300,
        repetition_penalty_slope=3.33,
        use_cache=True,
        pad_token_id=tokenizer.eos_token_id,
    ).long()

    generated = tokenizer.decode(out[0])

    return generated
Ejemplo n.º 5
0
def in_colab():
    """Some terminal codes don't work in a colab notebook."""
    # from https://github.com/tqdm/tqdm/blob/master/tqdm/autonotebook.py
    if settings.getboolean("colab-mode"):
        return True
    try:
        from IPython import get_ipython
        if (not get_ipython()) or (
                'IPKernelApp' not in get_ipython().config):  # pragma: no cover
            raise ImportError("console")
        if 'VSCODE_PID' in os.environ:  # pragma: no cover
            raise ImportError("vscode")
    except ImportError:
        if get_terminal_size()[0] == 0 or 'google.colab' in sys.modules:
            settings["colab-mode"] = "on"
            settings["prompt-toolkit"] = "off"
            return True
        return False
    else:
        settings["colab-mode"] = "on"
        settings["prompt-toolkit"] = "off"
        return True
Ejemplo n.º 6
0
def play(generator):
    print("\n")

    with open(Path("interface", "mainTitle.txt"), "r",
              encoding="utf-8") as file:
        colPrint(file.read(), colors["title"], wrap=False)

    with open(Path("interface", "subTitle.txt"), "r",
              encoding="utf-8") as file:
        cols = termWidth
        for line in file:
            line = re.sub(r'\n', '', line)
            line = line[:cols]
            #fills in the graphic using reverse video mode substituted into the areas between |'s
            colPrint(
                re.sub(r'\|[ _]*(\||$)',
                       lambda x: '\x1B[7m' + x.group(0) + '\x1B[27m', line),
                colors['subtitle'], False)

    print()
    colPrint(
        "Go to https://github.com/cloveranon/Clover-Edition/ or email [email protected] for bug reports, help, and feature requests.",
        colors['subsubtitle'])

    while True:
        # May be needed to avoid out of mem
        gc.collect()
        torch.cuda.empty_cache()

        print("\n\n")

        colPrint(
            "0: Pick Prompt From File (Default if you type nothing)\n1: Write Custom Prompt",
            colors['menu'])

        if getNumberInput(1) == 1:
            with open(Path("interface", "prompt-instructions.txt"),
                      "r",
                      encoding="utf-8") as file:
                colPrint(file.read(), colors["instructions"], False)
            prompt = colInput("Prompt>", colors["main-prompt"],
                              colors["user-text"])
            context = colInput("Context>", colors["main-prompt"],
                               colors["user-text"])
            filename = colInput(
                "Name to save prompt as? (Leave blank for no save): ",
                colors["query"],
                colors["user-text"],
            )
            filename = re.sub(
                "-$", "",
                re.sub("^-", "", re.sub("[^a-zA-Z0-9_-]+", "-", filename)))
            if filename != "":
                with open(Path("prompts", filename + ".txt"),
                          "w",
                          encoding="utf-8") as f:
                    f.write(context + "\n" + prompt)
        else:
            prompt, context = selectFile()
        assert (prompt + context)

        instructions()

        print()
        colPrint("Generating story...", colors["loading-message"])

        story = newStory(generator, prompt, context)

        while True:
            # Generate suggested actions
            act_alts = settings.getint("action-sugg")
            if act_alts > 0:

                # TODO change this to two messages for different colors
                suggested_actions = []
                colPrint("\nSuggested actions:", colors["selection-value"])
                action_suggestion_lines = 2
                for i in range(act_alts):
                    suggested_action = story.getSuggestion()
                    if len(suggested_action.strip()) > 0:
                        j = len(suggested_actions)
                        suggested_actions.append(suggested_action)
                        suggestion = "{}> {}".format(j, suggested_action)
                        action_suggestion_lines += colPrint(
                            suggestion, colors["selection-value"])
                print()

            bell()
            action = colInput("> You ", colors["main-prompt"],
                              colors["user-text"])

            # Clear suggestions and user input
            if act_alts > 0:
                action_suggestion_lines += 2
                if not IN_COLAB:
                    clear_lines(action_suggestion_lines)

                    # Show user input again
                    # colPrint("\n> " + action.rstrip(), colors["user-text"], end="")

            setRegex = re.search("^/set ([^ ]+) ([^ ]+)$", action)
            if setRegex:
                if setRegex.group(1) in settings:
                    currentSettingValue = settings[setRegex.group(1)]
                    colPrint(
                        "Current Value of {}: {}     Changing to: {}".format(
                            setRegex.group(1), currentSettingValue,
                            setRegex.group(2)))
                    settings[setRegex.group(1)] = setRegex.group(2)
                    colPrint("Save config file?", colors["query"])
                    colPrint("Saving an invalid option will corrupt file!",
                             colors["error"])
                    if (colInput(
                            "y/n? >",
                            colors["selection-prompt"],
                            colors["selection-value"],
                    ) == "y"):
                        with open("config.ini", "w", encoding="utf-8") as file:
                            config.write(file)
                else:
                    colPrint("Invalid Setting", colors["error"])
                    instructions()
            elif action == "/menu":
                break
            elif action == "/restart":
                print()
                colPrint("Restarting story...", colors["loading-message"])

                story = newStory(generator, story.prompt, context)
                continue
            elif action == "/quit":
                exit()
            elif action == "/help":
                instructions()
            elif action == "/print":
                print("\nPRINTING\n")
                #TODO colorize printed story
                colPrint(str(story), colors["print-story"])
            elif action == '/retry':

                if len(story.story) == 1:
                    print()
                    colPrint("Restarting story...", colors["loading-message"])
                    story = newStory(generator, story.prompt, context)
                    continue
                else:
                    newaction = story.story[-1][0]

                colPrint(newaction, colors['user-text'], end='')
                story.story = story.story[:-1]
                result = "\n" + story.act(newaction)[0]

                if len(story.story) >= 2:
                    similarity = get_similarity(result, story.story[-2][1][0])
                    if similarity > 0.9:
                        story.story = story.story[:-1]
                        colPrint(
                            "Woops that action caused the model to start looping. Try a different action to prevent that.",
                            colors["error"],
                        )
                        continue
                colPrint(result, colors["ai-text"])

                continue

            elif action == '/revert':

                if len(story.story) == 1:
                    colPrint("You can't go back any farther. ",
                             colors["error"])
                    continue

                story.story = story.story[:-1]
                colPrint("Last action reverted. ", colors["message"])
                if len(story.story) < 2:
                    colPrint(story.prompt, colors["ai-text"])
                colPrint(story.story[-1][1][0], colors["ai-text"])

                continue

            elif action == "/alter":
                story.story[-1][1][0] = alterText(story.story[-1][1][0])
                if len(story.story) < 2:
                    colPrint(story.prompt, colors["ai-text"])
                else:
                    colPrint("\n" + story.story[-1][0] + "\n",
                             colors["transformed-user-text"])
                colPrint("\n" + story.story[-1][1][0] + "\n\n",
                         colors["ai-text"])

            elif action == "/prompt":
                story.prompt = alterText(story.prompt)
                if len(story.story) < 2:
                    colPrint(story.prompt, colors["ai-text"])
                else:
                    colPrint("\n" + story.story[-1][0] + "\n",
                             colors["transformed-user-text"])
                colPrint("\n" + story.story[-1][1][0] + "\n\n",
                         colors["ai-text"])

            else:
                if act_alts > 0:
                    # Options to select a suggestion action
                    if action in [
                            str(i) for i in range(len(suggested_actions))
                    ]:
                        action = suggested_actions[int(action)]

                original_action = action
                action = action.strip()
                #TODO debug stuff to delete
                if action != original_action:
                    logger.debug("STRIPPED WHITE SPACE OFF ACTION %r vs %r",
                                 original_action, action)

                # Crop actions to a max length
                #action = action[:4096]

                if action != "":

                    # Roll a 20 sided dice to make things interesting
                    d = random.randint(1, 20)
                    logger.debug("roll d20=%s", d)

                    # If it says 'You say "' then it's still dialouge. Normalise it by removing `You say `, we will add again soon
                    action = re.sub("^ ?[Yy]ou say [\"']", '"', action)
                    if any(action.lstrip().startswith(t) for t in ['"', "'"]):
                        if settings.getboolean("action-d20"):
                            action = d20ify_speech(action, d)
                        else:
                            action = "You say " + action
                        logger.info(
                            "%r. %r, %r", action,
                            any(action.lstrip().startswith(t)
                                for t in ['"', "'"]),
                            settings.getboolean("action-d20"))
                    else:
                        action = first_to_second_person(action)
                        if not action.lower().startswith(
                                "you ") and not action.lower().startswith(
                                    "i "):
                            action = action[0].lower() + action[1:]
                            # roll a d20
                            if settings.getboolean("action-d20"):
                                action = d20ify_action(action, d)
                            else:
                                action = "You " + action

                        if action[-1] not in [".", "?", "!"]:
                            action = action + "."

                action = "\n> " + action + "\n"

                colPrint(
                    "\n>" + action.lstrip().lstrip("> \n"),
                    colors["transformed-user-text"],
                )
                #TODO check if leading white space makes sense
                result = "\n" + story.act(action)[0]

                #TODO: Replace all this nonsense
                if len(story.story) >= 2:
                    similarity = get_similarity(result, story.story[-2][1][0])
                    if similarity > 0.9:
                        story.story = story.story[:-1]
                        colPrint(
                            "Woops that action caused the model to start looping. Try a different action to prevent that.",
                            colors["error"],
                        )
                        continue

                if player_won(result):
                    colPrint(result + "\n CONGRATS YOU WIN", colors["message"])
                    break
                elif player_died(result):
                    colPrint(result, colors["ai-text"])
                    colPrint("YOU DIED. GAME OVER", colors["error"])
                    colPrint(
                        "\nOptions:\n0)Start a new game\n1)\"I'm not dead yet!\" (If you didn't actually die)",
                        colors["menu"],
                    )
                    choice = getNumberInput(1)
                    if choice == 0:
                        break
                    else:
                        colPrint("Sorry about that...where were we?",
                                 colors["query"])
                colPrint(result, colors["ai-text"])
Ejemplo n.º 7
0
def bell():
    if settings.getboolean("console-bell"):
        print("\x07", end="")
Ejemplo n.º 8
0
        if (not get_ipython()) or (
                'IPKernelApp' not in get_ipython().config):  # pragma: no cover
            raise ImportError("console")
        if 'VSCODE_PID' in os.environ:  # pragma: no cover
            raise ImportError("vscode")
    except ImportError:
        if get_terminal_size()[0] == 0 or 'google.colab' in sys.modules:
            return True
        return False
    else:
        return True


IN_COLAB = _in_colab()
logger.info("Colab detected: {}".format(IN_COLAB))
IN_COLAB = IN_COLAB or settings.getboolean('colab-mode')
if IN_COLAB:
    logger.warning(
        "Colab mode enabled, disabling line clearing and readline to avoid colab bugs."
    )
else:
    try:
        import readline
        logger.info(
            'readline has been imported. This enables a number of editting features but may cause bugs for colab users.'
        )
    except ModuleNotFoundError:
        pass

termWidth = get_terminal_size()[0]
if termWidth < 5:
Ejemplo n.º 9
0
def sample_sequence(model,
                    length,
                    context,
                    temperature=1,
                    top_k=0,
                    top_p=0.9,
                    repetition_penalty=1.0,
                    repetition_penalty_range=512,
                    repetition_penalty_slope=3.33,
                    device="cpu",
                    stop_tokens=None,
                    tokenizer=None):
    """Actually generate the tokens"""
    logger.debug(
        'temp: {}    top_k: {}    top_p: {}    rep-pen: {}    rep-pen-range: {}    rep-pen-slope: {}'
        .format(temperature, top_k, top_p, repetition_penalty,
                repetition_penalty_range, repetition_penalty_slope))
    context_tokens = context
    context = torch.tensor(context, dtype=torch.long, device=device)
    # context = context.repeat(num_samples, 1)
    generated = context
    USE_PAST = True
    next_token = context
    pasts = None
    clines = 0

    penalty = None
    if not repetition_penalty_range is None and not repetition_penalty_slope is None and repetition_penalty_range > 0:
        penalty = (torch.arange(repetition_penalty_range) /
                   (repetition_penalty_range - 1)) * 2. - 1
        penalty = (repetition_penalty_slope *
                   penalty) / (1 + torch.abs(penalty) *
                               (repetition_penalty_slope - 1))
        penalty = 1 + ((penalty + 1) / 2) * (repetition_penalty - 1)

    with torch.no_grad():
        for j in range(length):
            # why would we ever not use past?
            # is generated and next_token always same thing?
            if not USE_PAST:
                input_ids_next = generated
                pasts = None
            else:
                input_ids_next = next_token

            # Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet/CTRL (cached hidden-states)
            model_kwargs = {"past": pasts, "use_cache": True}
            model_inputs = model.prepare_inputs_for_generation(
                generated.unsqueeze(0), **model_kwargs)
            model_outputs = model(**model_inputs, return_dict=True)
            logits, pasts = model_outputs.logits, model_outputs.past_key_values
            logits = logits[0, -1, :].float()

            # Originally the order was Temperature, Repetition Penalty, then top-k/p
            if settings.getboolean('top-p-first'):
                logits = top_k_top_p_filtering(logits,
                                               top_k=top_k,
                                               top_p=top_p)

            logits = logits / (temperature if temperature > 0 else 1.0)

            # repetition penalty from CTRL (https://arxiv.org/abs/1909.05858) plus range limit
            if repetition_penalty != 1.0:
                if penalty is not None:
                    penalty_len = min(generated.shape[0],
                                      repetition_penalty_range)
                    penalty_context = generated[-repetition_penalty_range:]
                    score = torch.gather(logits, 0, penalty_context)
                    penalty = penalty.type(score.dtype).to(score.device)
                    penalty_window = penalty[-penalty_len:]
                    score = torch.where(score < 0, score * penalty_window,
                                        score / penalty_window)
                    logits.scatter_(0, penalty_context, score)
                else:
                    score = torch.gather(logits, 0, generated)
                    score = torch.where(score < 0, score * repetition_penalty,
                                        score / repetition_penalty)
                    logits.scatter_(0, generated, score)

            if not settings.getboolean('top-p-first'):
                logits = top_k_top_p_filtering(logits,
                                               top_k=top_k,
                                               top_p=top_p)

            if temperature == 0:  # greedy sampling:
                next_token = torch.argmax(logits, dim=-1).unsqueeze(-1)
            else:
                next_token = torch.multinomial(F.softmax(logits, dim=-1),
                                               num_samples=1)
            generated = torch.cat((generated, next_token), dim=-1)
            # Decode into plain text
            o = generated[len(context_tokens):].tolist()
            generated.text = tokenizer.decode(
                o,
                clean_up_tokenization_spaces=False,
                skip_special_tokens=True)
            if use_ptoolkit():
                clear_lines(clines)
                generated.text = format_result(generated.text)
                clines = output(generated.text, "ai-text")
            if ((stop_tokens is not None) and (j > 4)
                    and (next_token[0] in stop_tokens)):
                # Why the minimum tokens, j>X. Because sometimes the models starts with whitespace, which will strip away anyway. Having a minimum amount of tokens before we stop usually means we don't just stop because of "\n " or similar
                logger.debug(
                    "Stopping generation as we found stop tokens. One of `%s`, in '%s'. token generated `%s`",
                    stop_tokens,
                    next_token,
                    j,
                )
                break
    clear_lines(clines)
    return generated
Ejemplo n.º 10
0
import os
from pathlib import Path
from typing import Union

import torch
import torch.nn.functional as F
import re
from gpt2 import GPT2LMHeadModelExperimental
from transformers import GPT2Tokenizer, GPT2LMHeadModel, GPTNeoForCausalLM
from getconfig import settings, logger
from utils import cut_trailing_sentence, output, clear_lines, format_result, use_ptoolkit

if not settings.getboolean('force-cpu') and not torch.cuda.is_available():
    logger.warning('CUDA is not available, you are limited to CPU only.')

DTYPE = torch.float32 if ((not torch.cuda.is_available()) or
                          settings.getboolean('force-cpu')) else torch.float16
logger.info('Cuda Available: {}    Force CPU: {}    Precision: {}'.format(
    torch.cuda.is_available(), settings.getboolean('force-cpu'),
    '32-bit' if DTYPE == torch.float32 else '16-bit'))

# warnings.filterwarnings("ignore")
MODEL_CLASSES = {
    "gpt2": (GPT2LMHeadModel, GPT2Tokenizer),
    "gpt2-experimental": (GPT2LMHeadModelExperimental, GPT2Tokenizer),
}


def getTokens(tokenizer, l):
    tokenizer.encode()
Ejemplo n.º 11
0
def sample_sequence(
        model,
        length,
        context,
        temperature=1,
        top_k=0,
        top_p=0.9,
        repetition_penalty=1.0,
        device="cpu",
        stop_tokens=None,
        tokenizer=None
):
    """Actually generate the tokens"""
    logger.debug(
        'temp: {}    top_k: {}    top_p: {}    rep-pen: {}'.format(temperature, top_k, top_p, repetition_penalty))
    context_tokens = context
    context = torch.tensor(context, dtype=torch.long, device=device)
    # context = context.repeat(num_samples, 1)
    generated = context
    USE_PAST = True
    next_token = context
    pasts = None
    clines = 0
    with torch.no_grad():
        for j in range(length):
            # why would we ever not use past?
            # is generated and next_token always same thing?
            if not USE_PAST:
                input_ids_next = generated
                pasts = None
            else:
                input_ids_next = next_token

            # Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet/CTRL (cached hidden-states)
            logits, pasts = model(input_ids=input_ids_next, past=pasts)
            logits = logits[-1, :].float()

            # переписать  логику TODO
            if settings.getboolean('sparse-gen'): 
                probs = entmax_bisect(logits, dim=-1, alpha=settings.sparse-level)
                next_token = torch.multinomial(probs, num_samples=1)
            else:
                # Originally the order was Temperature, Repetition Penalty, then top-k/p
                if settings.getboolean('top-p-first'):
                    logits = top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p)

                logits = logits / (temperature if temperature > 0 else 1.0)

                # repetition penalty from CTRL (https://arxiv.org/abs/1909.05858)
                for k in set(generated.tolist()):
                    logits[k] /= repetition_penalty

                if not settings.getboolean('top-p-first'):
                    logits = top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p)

                if temperature == 0:  # greedy sampling:
                    next_token = torch.argmax(logits, dim=-1).unsqueeze(-1)
                else:
                    next_token = torch.multinomial(
                        F.softmax(logits, dim=-1), num_samples=1
                    )
            generated = torch.cat((generated, next_token), dim=-1)
            # Decode into plain text
            o = generated[len(context_tokens):].tolist()
            generated.text = tokenizer.decode(
                o, clean_up_tokenization_spaces=False, skip_special_tokens=True
            )
            if use_ptoolkit():
                clear_lines(clines)
                generated.text = format_result(generated.text)
                clines = output(generated.text, "ai-text")
            if (
                    (stop_tokens is not None)
                    and (j > 4)
                    and (next_token[0] in stop_tokens)
            ):
                # Why the minimum tokens, j>X. Because sometimes the models starts with whitespace, which will strip away anyway. Having a minimum amount of tokens before we stop usually means we don't just stop because of "\n " or similar
                logger.debug(
                    "Stopping generation as we found stop tokens. One of `%s`, in '%s'. token generated `%s`",
                    stop_tokens,
                    next_token,
                    j,
                )
                break
    clear_lines(clines)
    return generated
Ejemplo n.º 12
0
import os
from pathlib import Path
from typing import Union

import torch
import torch.nn.functional as F
import re
from gpt2 import GPT2LMHeadModelExperimental
from transformers import GPT2Tokenizer, GPT2LMHeadModel
from getconfig import settings, logger
from utils import cut_trailing_sentence, output, clear_lines, format_result, use_ptoolkit

if not settings.getboolean('force-cpu') and not torch.cuda.is_available():
    logger.warning('CUDA is not available, you are limited to CPU only.')

DTYPE = torch.float32 if ((not torch.cuda.is_available()) or settings.getboolean('force-cpu')) else torch.float16
logger.info('Cuda Available: {}    Force CPU: {}    Precision: {}'.format(torch.cuda.is_available(),
                                                                          settings.getboolean('force-cpu'),
                                                                          '32-bit' if DTYPE == torch.float32 else '16-bit'))

# warnings.filterwarnings("ignore")
MODEL_CLASSES = {
    "gpt2": (GPT2LMHeadModel, GPT2Tokenizer),
    "gpt2-experimental": (GPT2LMHeadModelExperimental, GPT2Tokenizer),
}


def getTokens(tokenizer, l):
    tokenizer.encode()

Ejemplo n.º 13
0
def use_ptoolkit():
    return not settings.getboolean("colab-mode") and settings.getboolean(
        'prompt-toolkit')
Ejemplo n.º 14
0
    """Clear the last line in the terminal."""
    if in_colab() or settings.getboolean('colab-mode'):
        # this wont work in colab etc
        return
    screen_code = "\033[1A[\033[2K"  # up one line, and clear line
    for _ in range(n):
        print(screen_code, end="\r")


if in_colab():
    logger.warning(
        "Colab mode enabled, disabling line clearing and readline to avoid colab bugs."
    )
else:
    try:
        if settings.getboolean('prompt-toolkit'):
            from inline_editor import edit_multiline
            from prompt_toolkit import prompt as ptprompt
            from prompt_toolkit import print_formatted_text
            from prompt_toolkit.styles import Style
            from prompt_toolkit.formatted_text import to_formatted_text, HTML
        else:
            raise ModuleNotFoundError

        logger.info(
            'Python Prompt Toolkit has been imported. This enables a number of editing features but may cause bugs for colab users.'
        )
    except (ImportError, ModuleNotFoundError):
        try:
            settings['prompt-toolkit'] = "off"
            import readline
Ejemplo n.º 15
0
def sample_sequence(model,
                    length,
                    context,
                    num_samples=1,
                    temperature=1,
                    top_k=0,
                    top_p=0.9,
                    repetition_penalty=1.0,
                    is_xlnet=False,
                    is_xlm_mlm=False,
                    xlm_mask_token=None,
                    xlm_lang=None,
                    device="cpu",
                    stop_tokens=None,
                    tokenizer=None):
    logger.debug('temp: {}    top_k: {}    top_p: {}    rep-pen: {}'.format(
        temperature, top_k, top_p, repetition_penalty))
    context = torch.tensor(context, dtype=torch.long, device=device)
    context = context.unsqueeze(0).repeat(num_samples, 1)
    generated = context
    USE_PAST = True
    next_token = context
    outputs = None
    with torch.no_grad():
        for j in range(length):
            #why would we ever not use past?
            #is generated and next_token always same thing?
            if USE_PAST:
                past = outputs[1] if outputs is not None else None
                inputs = {"input_ids": next_token, "past": past}
            else:
                inputs = {"input_ids": generated}

            outputs = model(
                **inputs
            )  # Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet/CTRL (cached hidden-states)

            logits = outputs[0][:, -1, :].float()

            #Originally the order was Temperature, Repetition Penalty, then top-k/p
            if settings.getboolean('top-p-first'):
                logits = top_k_top_p_filtering(logits,
                                               top_k=top_k,
                                               top_p=top_p)

            logits = logits / (temperature if temperature > 0 else 1.0)

            # repetition penalty from CTRL (https://arxiv.org/abs/1909.05858)
            for i in range(num_samples):
                for k in set(generated[i].tolist()):
                    logits[i, k] /= repetition_penalty

            if not settings.getboolean('top-p-first'):
                logits = top_k_top_p_filtering(logits,
                                               top_k=top_k,
                                               top_p=top_p)

            if temperature == 0:  # greedy sampling:
                next_token = torch.argmax(logits, dim=-1).unsqueeze(-1)
            else:
                next_token = torch.multinomial(F.softmax(logits, dim=-1),
                                               num_samples=1)
            generated = torch.cat((generated, next_token), dim=1)
            if ((stop_tokens is not None) and (j > 4)
                    and (next_token[0][0] in stop_tokens)):
                # Why the minimum tokens, j>X. Because sometimes the models starts with whitespace, which will strip away anyway. Having a minimum amount of tokens before we stop usually means we don't just stop because of "\n " or similar
                logger.debug(
                    "Stopping generation as we found stop tokens. One of `%s`, in '%s'. token generated `%s`",
                    stop_tokens,
                    next_token,
                    j,
                )
                break
    return generated
Ejemplo n.º 16
0
import os
from pathlib import Path
import itertools
import torch
import torch.nn.functional as F

from transformers import GPT2LMHeadModel, GPT2Tokenizer

from getconfig import settings, logger
from story.utils import cut_trailing_sentence

DTYPE = torch.float32 if ((not torch.cuda.is_available()) or
                          settings.getboolean('force-cpu')) else torch.float16
logger.info('Cuda Available: {}    Force CPU: {}    DTYPE: {}'.format(
    torch.cuda.is_available(), settings.getboolean('force-cpu'), DTYPE))

# warnings.filterwarnings("ignore")
MODEL_CLASSES = {
    "gpt2": (GPT2LMHeadModel, GPT2Tokenizer),
}


def top_k_top_p_filtering(logits,
                          top_k=0,
                          top_p=0.0,
                          filter_value=-float("Inf")):
    """ Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
        Args:
            logits: logits distribution shape (batch size x vocabulary size)
            top_k > 0: keep only top k tokens with highest probability (top-k filtering).
            top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Ejemplo n.º 17
0
def use_ptoolkit():
    return not in_colab() and settings.getboolean('prompt-toolkit')