def test_from_pipeline_conversation(self):
        model_id = "facebook/blenderbot_small-90M"

        # from model id
        conversation_agent_from_model_id = pipeline("conversational",
                                                    model=model_id,
                                                    tokenizer=model_id)

        # from model object
        model = BlenderbotSmallForConditionalGeneration.from_pretrained(
            model_id)
        tokenizer = BlenderbotSmallTokenizer.from_pretrained(model_id)
        conversation_agent_from_model = pipeline("conversational",
                                                 model=model,
                                                 tokenizer=tokenizer)

        conversation = Conversation("My name is Sarah and I live in London")
        conversation_copy = Conversation(
            "My name is Sarah and I live in London")

        result_model_id = conversation_agent_from_model_id([conversation])
        result_model = conversation_agent_from_model([conversation_copy])

        # check for equality
        self.assertEqual(
            result_model_id.generated_responses[0],
            "hi sarah, i live in london as well. do you have any plans for the weekend?",
        )
        self.assertEqual(
            result_model_id.generated_responses[0],
            result_model.generated_responses[0],
        )
 def test_generate_fp16(self):
     config, input_dict = self.model_tester.prepare_config_and_inputs()
     input_ids = input_dict["input_ids"]
     attention_mask = input_ids.ne(1).to(torch_device)
     model = BlenderbotSmallForConditionalGeneration(config).eval().to(torch_device)
     if torch_device == "cuda":
         model.half()
     model.generate(input_ids, attention_mask=attention_mask)
     model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3)
Ejemplo n.º 3
0
def Loader():
    f = open("step.txt", "w")
    f.write("0")
    f.close()
    mname = 'facebook/blenderbot-90M'
    model = BlenderbotSmallForConditionalGeneration.from_pretrained(mname)
    tokenizer = BlenderbotSmallTokenizer.from_pretrained(mname)
    return tokenizer, model
Ejemplo n.º 4
0
    def __init__(self, size, env, device, max_context_length):
        """
        The Blender chatbot model was proposed in Recipes for building an open-domain chatbot Stephen Roller,
        Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith,
        Y-Lan Boureau, Jason Weston on 30 Apr 2020.

        Args:
            size (str): model size
            env (BaseEnv): dialogue environment
            device (str): device (one of ['CPU', 'CUDA', 'CUDA:N']
            max_context_length (int): max history context length
                (it means that length of input context tokens)
        """

        assert size in ['small', 'medium', 'large', 'xlarge'], \
            "model size must be one of ['small', 'medium', 'large', 'xlarge']"

        if size == "small":
            super().__init__("facebook/blenderbot_small-90M", env)
            self.model = BlenderbotSmallForConditionalGeneration.from_pretrained(
                self.name).to(device)
            self.tokenizer = BlenderbotSmallTokenizer.from_pretrained(
                self.name)
        else:
            if size == "medium":
                super().__init__("facebook/blenderbot-400M-distill", env)
            elif size == "large":
                super().__init__("facebook/blenderbot-1B-distill", env)
            elif size == "xlarge":
                super().__init__("facebook/blenderbot-3B", env)

            self.model = BlenderbotForConditionalGeneration.from_pretrained(
                self.name).to(device)
            self.tokenizer = BlenderbotTokenizer.from_pretrained(self.name)

        self.size = size
        self.device = device.lower()
        self.max_context_length = max_context_length
        self.eos = "</s> <s>"

        print('Done!')
Ejemplo n.º 5
0
 def model(self):
     model = BlenderbotSmallForConditionalGeneration.from_pretrained(
         self.ckpt).to(torch_device)
     if torch_device == "cuda":
         model = model.half()
     return model
Ejemplo n.º 6
0
    tokenizer = GPT2Tokenizer.from_pretrained("microsoft/DialoGPT-small")
    #------dialogpt medium------#
    model = GPT2LMHeadModel.from_pretrained("microsoft/DialoGPT-medium")
    tokenizer = GPT2Tokenizer.from_pretrained("microsoft/DialoGPT-medium")

    print("dialogpt is done!")

elif download_type == 'gptneo':
    #------gptneo small------#
    model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-125M")
    tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-125M")
    #------gptneo large------#
    #model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-1.3B")
    #tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-1.3B")

    print("gptneo is done!")

elif download_type == 'blender':
    #------blender small------#
    model = BlenderbotSmallForConditionalGeneration.from_pretrained(
        "facebook/blenderbot_small-90M")
    tokenizer = BlenderbotSmallTokenizer.from_pretrained(
        "facebook/blenderbot_small-90M")
    #------blender medium------#
    model = BlenderbotForConditionalGeneration.from_pretrained(
        "facebook/blenderbot-400M-distill")
    tokenizer = BlenderbotTokenizer.from_pretrained(
        "facebook/blenderbot-400M-distill")

    print("blender is done!")