def _get_utterances(self, prompt, slots, filter_na_slots, max_length):
        response = openai.Completion().create(prompt=prompt,
                                              **self.gpt3_settings)

        utterances = [
            t.lstrip('- ')
            for t in response['choices'][0]['text'].strip('\n').split('\n')
        ]

        if filter_na_slots:
            utterances_slots = [
                re.findall('\((.*)\)', utt) for utt in utterances
            ]
            utterances = [
                utt for utt, utt_slots in zip(utterances, utterances_slots)
                if all(utt_slot in slots for utt_slot in utt_slots)
            ]

        if max_length:
            utterances = [utt for utt in utterances if len(utt) <= max_length]

        return utterances
Exemple #2
0
from dotenv import load_dotenv
from random import choice
from flask import Flask, request
import os
import openai
load_dotenv()
openai.api_key = os.getenv('OPENAI_API_KEY')
completion = openai.Completion()

start_sequence = "\nAngela: "
restart_sequence = "\nHuman: "
session_prompt = "Angela is a chatbot that is based on principles of nonviolent communication. Angela repeatedly asks if, with whom, what, where, when, how, or why a conflict is happening and then synthesizes observations,feelings, needs and requests for the human."


def ask(question, chat_log=None):
    prompt_text = f'{chat_log}{restart_sequence}{question}{start_sequence}'[
        -1847:]
    response = openai.Completion.create(
        engine="text-davinci-002",
        prompt=prompt_text,
        temperature=1.0,
        max_tokens=200,
        top_p=1,
        frequency_penalty=1.0,
        presence_penalty=1.0,
        stop=["Human:", "Angela:"],
    )
    story = response['choices'][0]['text']
    return str(story)

Exemple #3
0
import os
from dotenv import load_dotenv
import openai

load_dotenv()

openai.api_key = os.environ.get("OPENAI_KEY")
client = openai.Completion()

INITIAL_CONVERSATION = '''Human: Hello, How are you?
AI: I am fine, thank you, how are you?
'''


def ask_ai(question, chat_log=None):
    if chat_log is None:
        chat_log = INITIAL_CONVERSATION
    prompt = f"{chat_log}Human: {question}\nAI:"
    response = client.create(
        prompt=prompt,  # The text
        engine="davinci",  # Most capable engine out of four
        stop=['\nHuman'],  # When to stop the generating text
        temperature=
        0.8,  # Creative risk of the model for generating message (0-1)
        top_p=
        1,  # Alternative for controlling the originality and creativity of the generated text
        frequency_panelty=
        0,  # Higher the value, higher the model will make effort to not repeating itself
        presence_panelty=
        0.6,  # Higher the value, higher the model will make effort in talking about new topics
        best_of=1,
Exemple #4
0
def generate_one_sentence(sentence, control, length=50, disc_weight=30, temperature=0.8, gpt3_id=None):
    """
    Generate one sentence based on input data.
    :param sentence: (string) context (prompt) used.
    :param topic: (dict) {topic: weight, topic:weight,...} topic that the sentence need to steer towards.
    :param extra_args: (dict) a dictionary that certain key will trigger additional functionality.
        disc_weight: Set this value to use a different control strength than default.
        get_gen_token_count: Return only how many tokens the generator has generated (for debug only).
    :return: sentence generated, or others if extra_args are specified.
    """
    secondary_code = control

    # disc_weight = self.disc_weight
    # if type(extra_args) is dict and 'disc_weight' in extra_args:
    #     disc_weight = extra_args['disc_weight']

    if sentence == "":
        print("Prompt is empty! Using a dummy sentence.")
        sentence = "."

    # Specify prompt below
    prompt = sentence

    # Calculate oroginal input length.
    length_of_prompt = len(sentence)

    start_len = 0
    text_ids = tokenizer.encode(prompt)
    length_of_prompt_in_tokens = len(text_ids)
    encoded_prompts = torch.LongTensor(text_ids).unsqueeze(0).to(device)

    if type(control) is str:
        multi_code = tokenizer.encode(secondary_code)
    elif type(control) is dict:
        multi_code = {}
        for item in secondary_code:
            encoded = tokenizer.encode(item)[0]  # only take the first one
            multi_code[encoded] = secondary_code[item]
    else:
        raise NotImplementedError("topic data type of %s not supported... Supported: (str,dict)" % type(control))

    # If 1, generate sentences towards a specific topic.
    attr_class = 1

    if int(control)!=-1:
      if gpt3_id is None:
        generated_sequence = model.generate(input_ids=encoded_prompts,
                                                  pad_lens=None,
                                                  max_length=length + length_of_prompt_in_tokens,
                                                  top_k=None,
                                                  top_p=None,
                                                  repetition_penalty=1.2,
                                                  rep_penalty_scale=10,
                                                  eos_token_ids=tokenizer.eos_token_id,
                                                  pad_token_id=0,  # self.tokenizer.eos_token_id,
                                                  do_sample=True,
                                                  temperature = temperature,
                                                  penalize_cond=True,
                                                  gedi_model=gedi_model,
                                                  tokenizer=tokenizer,
                                                  disc_weight=disc_weight,
                                                  filter_p=filter_p,
                                                  target_p=target_p,
                                                  class_bias=class_bias,
                                                  attr_class=attr_class,
                                                  code_0=code_undesired,
                                                  code_1=code_desired,
                                                  multi_code=multi_code,
                                                  )
      else: 
        generated_sequence = model.generate(input_ids=encoded_prompts,
                                                  pad_lens=None,
                                                  max_length=length + length_of_prompt_in_tokens,
                                                  top_k=None,
                                                  top_p=None,
                                                  repetition_penalty=1.2,
                                                  rep_penalty_scale=10,
                                                  eos_token_ids=tokenizer.eos_token_id,
                                                  pad_token_id=0,  # self.tokenizer.eos_token_id,
                                                  do_sample=True,
                                                  temperature = temperature,
                                                  penalize_cond=True,
                                                  gedi_model=gedi_model,
                                                  tokenizer=tokenizer,
                                                  disc_weight=disc_weight,
                                                  filter_p=filter_p,
                                                  target_p=target_p,
                                                  class_bias=class_bias,
                                                  attr_class=attr_class,
                                                  code_0=code_undesired,
                                                  code_1=code_desired,
                                                  multi_code=multi_code,
                                                  gpt3_api_key=gpt3_id,
                                                  )
      text = tokenizer.decode(generated_sequence.tolist()[0], clean_up_tokenization_spaces=True,
                                  skip_special_tokens=True)

      text = text[length_of_prompt:]
    else:
      if gpt3_id is None:
        generated_sequence = model.generate(input_ids=encoded_prompts,
                                                  pad_lens=None,
                                                  max_length=length + length_of_prompt_in_tokens,
                                                  top_k=None,
                                                  top_p=None,
                                                  repetition_penalty=1.2,
                                                  rep_penalty_scale=10,
                                                  eos_token_ids=tokenizer.eos_token_id,
                                                  pad_token_id=0,  # self.tokenizer.eos_token_id,
                                                  do_sample=True,
                                                  temperature = temperature, 
                                                  penalize_cond=True,
                                                  gedi_model=None,
                                                  tokenizer=tokenizer,
                                                  disc_weight=disc_weight,
                                                  # filter_p=filter_p,
                                                  # target_p=target_p,
                                                  class_bias=class_bias,
                                                  attr_class=attr_class,
                                                  # code_0=code_undesired,
                                                  # code_1=code_desired,
                                                  # multi_code=multi_code,
                                                  )
        text = tokenizer.decode(generated_sequence.tolist()[0], clean_up_tokenization_spaces=True,
                                  skip_special_tokens=True)

        text = text[length_of_prompt:]
        
      else:
        import openai
        openai.api_key = gpt3_id
        completion = openai.Completion()
        response = completion.create(prompt=prompt,
                                 engine="curie",
                                 max_tokens=length,
                                 temperature=temperature,)
        text = response["choices"][0]["text"]
    # if type(extra_args) is dict and 'get_gen_token_count' in extra_args:
    #     return len(generated_sequence.tolist()[0])

    
    text = cut_into_sentences(text)
    if len(text) == 0:
        print("Warning! No text generated.")
        return ""
    all_gen_text = text[0]
    return all_gen_text
Exemple #5
0
def predict_openai(text):
    openai.api_key = OPENAI_API_KEY
    response = openai.Completion(
        engine='davinci',
        prompt=
        '''Tweet: "I think women should be able to choose what they do with their bodies"
Sentiment: Left
###
Tweet: "The radical left or socialism is such a pain💢"
Sentiment: Right
###
Tweet: "Donald Trump is the best thing to happen to this country👍"
Sentiment: Right
###
Tweet: "This country needs to heal right now"
Sentiment: Left
###
Tweet: "This is the link to the article"
Sentiment: Neutral
###
Tweet: "I voted to acquit former President Trump. Read my full statement below"
Sentiment: Right
###
Tweet: "Now it’s time for us as a country to move on. We need to remember that at the end of the day we’re on the same team: the American team. Both sides can do better at remembering that."
Sentiment: Right
###
Tweet: "This trial proved Trump’s high crimes against the Constitution. 43 senators put Trump first and failed the test of history. But history was also made with the largest bipartisan majority ever voting to convict a president. The rest of the story is ours to write."
Sentiment: Left
###
Tweet: "Too many Republican senators are comfortable hiding behind their misguided belief that trying a former president for his actions in office is unconstitutional — even as they refuse to answer whether actually inciting an insurrection is unconstitutional."
Sentiment: Left
###
Tweet: "I voted for convicting Trump because he should be held accountable for inciting a violent insurrection against the will of the people, Congress, and our Democracy. He should be held accountable for violating his oath of office and failing to support and defend our Constitution."
Sentiment: Left
###
Tweet: "After carefully listening to every minute of the presentations made by the House Managers and the former president’s legal team, I am convinced that the Senate does not have jurisdiction to render a judgement against the former president. Therefore, I voted not guilty."
Sentiment: Right
###
Tweet: "I was proud to pay a high starting wage in my own business, but a national $15 minimum wage would be a straight jacket on the economy. You're going to lose somewhere around 1 to 1.5 million jobs, especially in places hit hardest by COVID like restaurants."
Sentiment: Right
###
Tweet: "REPORT: A $15 min wage would Upwards arrow child care costs 21% on avg in America. Child care costs in Iowa are already skyrocketing. A $15 min wage is not the right solution for our working families. Expanding access to child care is—& I'll keep fighting to do so."
Sentiment: Right
###
Tweet: "The CDC & overwhelming scientific data say it’s safe for kids to go back to school. Schools have only used $4B of the $68B they’ve already been given by Congress. So why is Biden ok with unions keeping kids out of the classroom schools closed?"
Sentiment: Right
###
Tweet: "The facts and the evidence were overwhelming—former President Donald Trump lied for months to his supporters, summoned them to Washington, and incited a violent insurrection against our government and our democracy."

Sentiment: Left
###
Tweet: "I saw and heard firsthand the insurrectionists, at the behest of the former president, try to use fear and violence to overturn our democratic election."
Sentiment: Left
###
Tweet: "It is truly sad and dangerous that only 7 Republicans voted to convict a president who is promoting a Big Lie, conspiracy theories and violence, and is aggressively trying to destroy American democracy."
Sentiment: Left
###
Tweet: "The evidence presented was overwhelming: Donald Trump used the presidency to incite a violent insurrection against our democracy. A bipartisan majority of Senators voted today to send a message to future presidents: this kind of conduct is impeachable and disqualifying."
Sentiment: Left
###
Tweet: "'THERE'S A LOT OF BUYER'S REMORSE' -- Sen. @MarshaBlackburn on the reaction she's seen from Americans to President Biden's flurry of executive orders"
Sentiment: Right
###
Tweet: "Environmental justice is about following the golden rule. I co-founded @EJusticeCaucus with @SenDuckworth and @SenBooker because we have a moral obligation to provide Americans with clean air to breathe and clean water to drink, regardless of race or zip code. It's time to act."
Sentiment: Left
###
Tweet: "This #BlackHistoryMonth, we honor the legacy of Hazel Johnson, the “Mother of Environmental Justice” by continuing to fight for bold, robust environmental legislation. Every American has the right to breathe clean air, drink safe water, & live on uncontaminated land."
Sentiment: Left
###
Tweet: "''' + text + '''"
Sentiment:''',
        max_tokens=3)
    print(response)
    return response['choices'][0]['text'].lower().replace('###', '').strip()
Exemple #6
0
 def __init__(self, API_KEY):
     if API_KEY is None:
         raise ValueError
     openai.api_key = API_KEY
     self.completion = openai.Completion()
     self.context = '''Human: Hello, who are you?