def interactive_test(): starting_temp = 40 #0.4 * 100 ending_temp = 60 #0.6 * 100 temp_incr = 10 #0.1 & 100 for i in range(starting_temp, ending_temp, temp_incr): temp = i / 100.0 interact_model(temperature=temp) print("Finished")
def handleMainProcess(toDel, c, strdata, jointpacketError, client): if len(jointpacketError) == 2: strdata = "/m" + jointpacketError[1] sds = strdata.split("\n") #parse and open params to send for gpt seed = int(sds[2]) if sds[1] == "false": seed = random.randint(0, 2**32 - 1) lenth = int(sds[3]) temp = float(sds[4]) top_k = int(sds[5]) output = gpt.interact_model(sds[6], sds[0][2:], seed, 1, 1, lenth if lenth != 0 else None, temp, top_k) newdata = output c.sendto(newdata.encode('utf-8'), client.addr) if toDel in clients: id = toDel.id clients.remove(toDel) print("Removed request " + str(id))
async def on_message(message): if message.author.id != 699023487188074562: # <-- JargoBot's ID. if message.content == "!obliviate": await message.channel.send( "*Forgets everything*\nHmm... Must have hit my head...") Jargobot.obliviate() Jargobot.dump_memory() else: await message.channel.trigger_typing() Jargobot.remember(message.content) response = interact_model( prompt=Jargobot.recollections).splitlines()[0] while response == "<|endoftext|>": await message.channel.trigger_typing() print("TRYING AGAIN! GOT ENDOFTEXT.") response = interact_model( prompt=Jargobot.recollections).splitlines()[0] await message.channel.send(response) Jargobot.remember(response) Jargobot.dump_memory()
def generate_responses(text): #,sess, gpt2): print('Generating responses') responses = interact_model(text + ' ||| ', top_k=40, temperature=0.8, nsamples=10, model_name='355M') # responses = gpt2.generate(sess, # length=280, # temperature=0.8, # prefix=text+' ||| ', # nsamples=10, # batch_size=5, # return_as_list=True) print(responses) return responses
def get_text_messages(message): answer_text = " ".join(message.text.lower().split()[:-1]) bot.send_message( message.from_user.id, interact_model(input_text=answer_text, length=int(message.text.split()[-1])))
def main(): allComments = extractAllComments() comments = interact_model(comments=allComments) postCommentHandler(comments)
if loss < best_loss: best_loss = loss loss = train(sess, data_test, is_train=False) print(" PPL on testing set:", loss) saver.save(sess, '%s/checkpoint' % train_dir, global_step=global_step.eval()) print("saving parameters in %s" % train_dir) else: if FLAGS.cond: print("begin conditionally generating stories......") interact_model( sess=sess, enc=enc, PAD_ID=PAD_ID, hparams=hparams, context=context, dataset= data_test, # Accept console input if `dataset` is set to None output_file_name="./inference_gpt2.txt", temperature=FLAGS.temperature, top_k=FLAGS.top_k) else: print("begin unconditionally generating stories......") sample_model(sess=sess, enc=enc, PAD_ID=PAD_ID, hparams=hparams, temperature=FLAGS.temperature, top_k=FLAGS.top_k) print("end generating stories......")
import interactive_conditional_samples as ics import json summary = 'this is a random phrase that will be used as a prompt for the model to use, in hope for it to generate more content that has both substance and fluff' smpls = ics.interact_model(summary) samplesJson = json.dumps(smpls) file = open('sample.txt', 'w') file.write(samplesJson) file.close()
for samp_strat, values in vals_dict.items(): for val in values: if samp_strat=='tfs': alpha_set=val # this is actually now a probability threshold elif samp_strat=='n': nuc_prob_set=val elif samp_strat=='flat': flat_set=val else: top_k_set=val interact_model( # some other variables are initialized below general_path = '', alpha=alpha_set, nuc_prob=nuc_prob_set, flat_prob = flat_set, sampler=samp_strat, #n, k or tfs pre_prepared_prompts = True, num_prepared_prompts_wanted = 100, #5000 model_name='774M', # '345M', seed=27, batch_size=25, # 500 generated_length=150, prompt_length = 100, temperature=1, top_k=top_k_set, models_dir='../gpt-2/models', )