def get_likeliest_prediction(cls, model, context: str, options: List[str]): """Return the likeliest option :param model: the model :param context: prefix string :param options: list of possible suffixes :return: index of likeliest option """ log_probs = model.cond_log_prob( inputs=context, targets=options, ) option_log_probs = np.array( [np.sum(token_log_probs) for token_log_probs in log_probs] ) return np.argmax(option_log_probs)
prompts = ["These are the times that", "Stately, plump Buck Mulligan"] responses = model.generate_text(inputs=prompts, max_length=32, stop_string=".") for p, r in zip(prompts, responses): print(f"prompt: {p}") print(f"response: {r}") # for testing, the prompt here has no trailing space, while the # next scoring example has a trailing space prompt = (f"What color is the sky? Answer: blue\n" f"What color is grass? Answer:") choices = ("red", "blue", "green") scores = model.cond_log_prob(inputs=prompt, targets=choices) print("\n") print(f"prompt:\n{prompt}") print(f"scores:") for c, s in zip(choices, scores): print(f" {c:>8}: {s:0.2f}") prompts = [ f"What color is the sky? Answer: blue\n" f"What color is grass? Answer: ", f"What is 1+1? Answer: 2\n" f"What is 2+2? Answer: ", ] choices = [("red", "blue", "green"), ("1", "2", "3", "4")]