def main():
    random.seed(42)

    # Get command line arguments
    parser = ParlaiParser()
    parser.add_argument('-n', '--num-examples', default=10)
    parser.set_defaults(datatype='train:ordered')

    ImageLoader.add_cmdline_args(parser)
    opt = parser.parse_args()

    opt['no_cuda'] = False
    opt['gpu'] = 0
    # create repeat label agent and assign it to the specified task
    agent = RepeatLabelAgent(opt)
    world = create_task(opt, agent)

    # Show some example dialogs.
    with world:
        for k in range(int(opt['num_examples'])):
            world.parley()
            print(world.display() + '\n~~')
            if world.epoch_done():
                print('EPOCH DONE')
                break
Beispiel #2
0
def main():
    random.seed(42)

    # Get command line arguments
    parser = ParlaiParser(True, True)
    parser.add_argument('-n', '--num-examples', default=100000000)
    parser.add_argument('-d', '--display-examples', type='bool', default=False)
    parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2)
    parser.set_defaults(datatype='valid')
    opt = parser.parse_args(print_args=False)

    eval_model(opt, parser)
Beispiel #3
0
    def test_custom_special_tokens(self):
        from parlai.agents.hugging_face.dict import Gpt2DictionaryAgent
        from parlai.core.params import ParlaiParser

        parser = ParlaiParser(False, False)
        parser.set_defaults(gpt2_size="small",
                            add_special_tokens=True,
                            fp16=True)
        Gpt2DictionaryAgent.add_cmdline_args(parser, partial_opt=None)
        with testing_utils.tempdir() as tmpdir:
            opt = parser.parse_kwargs(dict_file=os.path.join(tmpdir, 'dict'))
            dict_agent = Gpt2DictionaryAgent(opt)
            oldtokens = dict_agent.txt2vec("Hi VOLDEMORT")
            prevlen = len(dict_agent)
            dict_agent.add_additional_special_tokens(["VOLDEMORT"])
            newlen = len(dict_agent)
            assert newlen == prevlen + 1
            tokens = dict_agent.txt2vec("Hi VOLDEMORT")
            assert tokens != oldtokens
            assert len(tokens) < len(oldtokens)
Beispiel #4
0
def setup_args(parser=None):
    if parser is None:
        parser = ParlaiParser(True, True, 'Evaluate a model')
    parser.add_pytorch_datateacher_args()
    # Get command line arguments
    parser.add_argument('-ne', '--num-examples', type=int, default=-1)
    parser.add_argument('-d', '--display-examples', type='bool', default=False)
    parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2)
    parser.add_argument('-micro', '--aggregate-micro', type='bool',
                        default=True,
                        help='If multitasking, average metrics over the '
                             'number of examples. If false, averages over the '
                             'number of tasks.')
    parser.add_argument('--metrics', type=str, default='all',
                        help='list of metrics to show/compute, e.g. '
                             'ppl, f1, accuracy, hits@1.'
                             'If `all` is specified [default] all are shown.')
    TensorboardLogger.add_cmdline_args(parser)
    parser.set_defaults(datatype='valid')
    return parser
Beispiel #5
0
def setup_args(parser=None):
    if parser is None:
        parser = ParlaiParser(True, True, 'Display data from a task')
    #parser.add_pytorch_datateacher_args()
    # Get command line arguments
    parser.add_argument('--seed', type=int, default=42)
    parser.add_argument('-n', '-ne', '--num-examples', type=int, default=10)
    parser.add_argument('-ns', '--num-stored', type=int, default=10)
    parser.add_argument('-mdl', '--max-display-len', type=int, default=1000)
    parser.add_argument('--display-ignore-fields',
                        type=str,
                        default='agent_reply')
    parser.set_defaults(datatype='train:stream')
    parser.add_argument('-host', '--mongo-host', type=str)
    parser.add_argument('-port', '--mongo-port', type=int)
    parser.add_argument('-user', '--user-name', type=str)
    parser.add_argument('-pw', '--password', type=str)
    parser.add_argument('-col', '--collection-name', type=str)
    WorldLogger.add_cmdline_args(parser)
    return parser
Beispiel #6
0
def setup_args(parser=None):
    if parser is None:
        parser = ParlaiParser(True, True, 'Interactive chat with a model')
    parser.add_argument('-d', '--display-examples', type='bool', default=False)
    parser.add_argument(
        '--display-prettify',
        type='bool',
        default=False,
        help='Set to use a prettytable when displaying '
        'examples with text candidates',
    )
    parser.add_argument(
        '--display-ignore-fields',
        type=str,
        default='label_candidates,text_candidates',
        help='Do not display these fields',
    )
    parser.set_defaults(model_file='models:convai2/kvmemnn/model')
    LocalHumanAgent.add_cmdline_args(parser)
    return parser
Beispiel #7
0
def setup_args(parser=None):
    if parser is None:
        parser = ParlaiParser(True, True, 'Display data from a task')
    # Get command line arguments
    parser.add_argument('-n', '-ne', '--num-examples', type=int, default=10)
    parser.add_argument('-mdl', '--max-display-len', type=int, default=1000)
    parser.add_argument('--display-ignore-fields',
                        type=str,
                        default='agent_reply')
    parser.add_argument(
        '-v',
        '--display-verbose',
        default=False,
        action='store_true',
        help=
        'If false, simple converational view, does not show other message fields.',
    )

    parser.set_defaults(datatype='train:ordered')
    return parser
Beispiel #8
0
 def add_cmdline_args(argparser: ParlaiParser):
     """
     Override to add init-fairseq-model arg.
     """
     TransformerGeneratorAgent.add_cmdline_args(argparser)
     group = argparser.add_argument_group('Bart Args')
     group.add_argument(
         '--init-fairseq-model',
         type=str,
         default=None,
         help='fairseq checkpoint for bart',
     )
     group.add_argument(
         '--output-conversion-path',
         type=str,
         default=None,
         help='where to save fairseq conversion',
     )
     argparser.set_defaults(dict_tokenizer='gpt2')
     argparser.set_defaults(**BART_ARGS)
def setup_args():
    """
    Set up conversion args.
    """
    parser = ParlaiParser()
    parser.add_argument(
        '-n',
        '--num-episodes',
        default=-1,
        type=int,
        help='Total number of episodes to convert, -1 to convert \
                                all examples',
    )
    parser.add_argument(
        '-of',
        '--outfile',
        default=None,
        type=str,
        help='Output file where to save, by default will be \
                                created in /tmp',
    )
    parser.add_argument('-s1id',
                        '--speaker-0-id',
                        type=str,
                        help='Speaker id of agent who speaks first')
    parser.add_argument(
        '-s1id',
        '--speaker-1-id',
        type=str,
        help='Speaker id of agent who speaks second',
    )
    parser.add_argument(
        '--prepended-context',
        type='bool',
        default=False,
        help='specify if the context is prepended to the first act',
    )
    parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=10)
    parser.set_defaults(datatype='train:ordered')

    return parser
Beispiel #10
0
def setup_args(parser=None):
    if parser is None:
        parser = ParlaiParser(True, True, 'Evaluate a model')
    # Get command line arguments
    parser.add_argument('-ne', '--num-examples', type=int, default=-1)
    parser.add_argument('-d', '--display-examples', type='bool', default=False)
    parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2)
    parser.add_argument('--metrics',
                        type=str,
                        default="all",
                        help="list of metrics to show/compute, e.g. "
                        "ppl,f1,accuracy,hits@1."
                        "If 'all' is specified [default] all are shown.")
    parser.add_argument('-pb', '--perturb', type=str, default="None")
    parser.add_argument('-sft',
                        '--skip_first_turn',
                        type='bool',
                        default=False)
    TensorboardLogger.add_cmdline_args(parser)
    parser.set_defaults(datatype='valid')
    return parser
def setup_args(parser=None) -> ParlaiParser:
    # Get command line arguments
    if not parser:
        parser = ParlaiParser()
    parser.add_argument(
        '-n',
        '--num-examples',
        default=-1,
        type=int,
        help='Total number of exs to convert, -1 to convert all examples',
    )
    parser.add_argument(
        '-of',
        '--outfile',
        default=None,
        type=str,
        help='Output file where to save, by default will be created in /tmp',
    )
    parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2)
    parser.set_defaults(datatype='train:evalmode')
    return parser
Beispiel #12
0
def setup_args(parser=None):
    if parser is None:
        parser = ParlaiParser(True, True,
                              'compute statistics from model predictions')
    parser.add_pytorch_datateacher_args()
    DictionaryAgent.add_cmdline_args(parser)
    # Get command line arguments
    parser.add_argument('-ne', '--num-examples', type=int, default=-1)
    parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2)
    parser.add_argument(
        '-ed',
        '--external-dict',
        type=str,
        default=None,
        help='External dictionary for stat computation',
    )
    parser.add_argument(
        '-fb',
        '--freq-bins',
        type=str,
        default='0,100,1000,10000',
        help='Bins boundaries for rare words stat',
    )
    parser.add_argument(
        '-dup',
        '--dump-predictions-path',
        type=str,
        default=None,
        help='Dump predictions into file',
    )
    parser.add_argument(
        '-cun',
        '--compute-unique',
        type=bool,
        default=True,
        help='Compute %% of unique responses from the model',
    )
    parser.set_defaults(datatype='valid', model='repeat_label')
    TensorboardLogger.add_cmdline_args(parser)
    return parser
Beispiel #13
0
def setup_args(parser=None):
    # Get command line arguments
    if parser is None:
        parser = ParlaiParser(True, True, 'Display data from a task')
    parser.add_argument(
        '-mf',
        '--model-file',
        type=str,
        default='/private/home/bhancock/metadialog/models/unknown.mdl',
    )
    parser.add_argument('-dfile', '--deploy-file', type=str, default='train_c')
    parser.add_argument('-sfile', '--supp-file', type=str, default='supp_c')
    parser.add_argument(
        '-cr',
        '--conversion-rate',
        type=float,
        default=1.0,
        help="The fraction of misses converted into new training data",
    )
    parser.add_argument(
        '-ca',
        '--conversion-acc',
        type=float,
        default=1.0,
        help="The fraction of converted data that have a correct label",
    )
    parser.set_defaults(bs=1)
    parser.set_defaults(ecands='inline')
    parser.set_defaults(datatype='valid')
    return parser
def main():
    parser = ParlaiParser(True, True)
    parser.set_defaults(batchsize=10,
                        sample=True,
                        max_seq_len=256,
                        beam_size=3,
                        annealing_topk=None,
                        annealing=0.6,
                        length_penalty=0.7)

    ConvAIWorld.add_cmdline_args(parser)
    TransformerAgent.add_cmdline_args(parser)
    opt = parser.parse_args()

    agent = TransformerAgent(opt)
    world = ConvAIWorld(opt, [agent])

    while True:
        try:
            world.parley()
        except Exception as e:
            print('Exception: {}'.format(e))
Beispiel #15
0
def setup_args(parser=None):
    if parser is None:
        parser = ParlaiParser(True, True, 'Evaluate a model')
    parser.add_pytorch_datateacher_args()
    # Get command line arguments
    parser.add_argument(
        '-rf',
        '--report-filename',
        type=str,
        default='',
        help='Saves a json file of the evaluation report either as an '
        'extension to the model-file (if begins with a ".") or a whole '
        'file path. Set to the empty string to not save at all.',
    )
    parser.add_argument('-ne', '--num-examples', type=int, default=-1)
    parser.add_argument('-d', '--display-examples', type='bool', default=False)
    parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2)
    parser.add_argument(
        '-micro',
        '--aggregate-micro',
        type='bool',
        default=False,
        help='If multitasking, average metrics over the '
        'number of examples. If false, averages over the '
        'number of tasks.',
    )
    parser.add_argument(
        '-mcs',
        '--metrics',
        type=str,
        default='default',
        help='list of metrics to show/compute, e.g. all, default,'
        'or give a list split by , like '
        'ppl,f1,accuracy,hits@1,rouge,bleu'
        'the rouge metrics will be computed as rouge-1, rouge-2 and rouge-l',
    )
    TensorboardLogger.add_cmdline_args(parser)
    parser.set_defaults(datatype='valid')
    return parser
Beispiel #16
0
def main():
    random.seed(42)
    # Get command line arguments
    parser = ParlaiParser()
    parser.add_argument(
        '-n',
        '--num-examples',
        default=-1,
        type=int,
        help='Total number of exs to convert, -1 to convert all examples',
    )
    parser.add_argument(
        '-of',
        '--outfile',
        default=None,
        type=str,
        help='Output file where to save, by default will be created in /tmp',
    )
    parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2)
    parser.set_defaults(datatype='train:evalmode')
    opt = parser.parse_args()
    build_cands(opt)
Beispiel #17
0
def main():
    random.seed(42)

    # Get command line arguments
    parser = ParlaiParser(True, True)
    parser.add_argument('-n', '--num-examples', default=10)
    # by default we want to display info about the validation set
    parser.set_defaults(datatype='valid')
    opt = parser.parse_args()

    # Create model and assign it to the specified task
    agent = create_agent(opt)
    world = create_task(opt, agent)

    # Show some example dialogs.
    with world:
        for k in range(int(opt['num_examples'])):
            world.parley()
            print(world.display() + "\n~~")
            if world.epoch_done():
                print("EPOCH DONE")
                break
    def run_actor(self):
        MODEL_FILE = '/Volumes/Data/ParlAI/from_pretrained_wiki/wikiqa_tdifd'
        DB_PATH = '/Volumes/Data/ParlAI/from_pretrained_wiki/wikiqa_tdifd.db'
        TFIDF_PATH = '/Volumes/Data/ParlAI/from_pretrained_wiki/wikiqa_tdifd.tfidf'
        # keep things quiet
        logger.setLevel(ERROR)
        parser = ParlaiParser(True, True)
        parser.set_defaults(
            model_file=MODEL_FILE,
            interactive_mode=True,
        )
        opt = parser.parse_args([], print_args=False)
        opt['interactive_mode'] = True
        agent = create_agent(opt)
        train_world = create_task(opt, agent)
        # pass examples to dictionary
        while not train_world.epoch_done():
            train_world.parley()
        agent.observe(obs)
        reply = 'Hello'

        return reply
Beispiel #19
0
 def add_cmdline_args(cls,
                      parser: ParlaiParser,
                      partial_opt: Optional[Opt] = None) -> ParlaiParser:
     agent = parser.add_argument_group('DialoGPT Args')
     agent.add_argument(
         '--gpt2-size',
         type=str,
         default='medium',
         choices=['small', 'medium', 'large'],
         help='Which size model to initialize.',
     )
     parser.set_defaults(
         delimiter='<|endoftext|>',
         history_add_global_end_token='<|endoftext|>',
         text_truncate=768,
         label_truncate=256,
         dict_maxexs=0,  # skip building dictionary
     )
     super().add_cmdline_args(parser, partial_opt=partial_opt)
     warn_once(
         'WARNING: this model is in beta and the API is subject to change.')
     return agent
Beispiel #20
0
def setup_args(parser=None):
    if parser is None:
        parser = ParlaiParser(True, True, 'Self chat with a model')
    parser.add_argument('--seed', type=int, default=42)
    parser.add_argument('-d', '--display-examples', type='bool', default=True)
    parser.add_argument('-n', '-ne', '--num-examples', type=int, default=10)
    parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2)
    parser.add_argument(
        '--display-ignore-fields',
        type=str,
        default='label_candidates,text_candidates',
        help='Do not display these fields',
    )
    parser.add_argument(
        '-it',
        '--interactive-task',
        type='bool',
        default=True,
        help='Create interactive version of task',
    )
    parser.add_argument(
        '--selfchat-max-turns',
        type=int,
        default=10,
        help="The number of dialogue turns before self chat ends.",
    )
    parser.add_argument(
        '--seed-messages-from-task',
        action='store_true',
        help="Automatically seed conversation with messages from task dataset.",
    )
    parser.add_argument('--outfile', type=str, default='/tmp/selfchat.json')
    parser.add_argument('--format',
                        type=str,
                        default='json',
                        choices={'parlai', 'json'})
    parser.set_defaults(interactive_mode=True, task='self_chat')
    WorldLogger.add_cmdline_args(parser)
    return parser
Beispiel #21
0
def main():
    # Get command line arguments
    parser = ParlaiParser(True, False)
    parser.set_defaults(datatype='train:ordered')

    opt = parser.parse_args()
    bsz = opt.get('batchsize', 1)
    opt['no_cuda'] = False
    opt['gpu'] = 0
    opt['num_epochs'] = 1
    # create repeat label agent and assign it to the specified task
    agent = RepeatLabelAgent(opt)
    world = create_task(opt, agent)

    logger = ProgressLogger(should_humanize=False)
    print("Beginning image extraction...")
    exs_seen = 0
    total_exs = world.num_examples()
    while not world.epoch_done():
        world.parley()
        exs_seen += bsz
        logger.log(exs_seen, total_exs)
    print("Finished extracting images")
Beispiel #22
0
 def add_cmdline_args(cls,
                      parser: ParlaiParser,
                      partial_opt: Optional[Opt] = None) -> ParlaiParser:
     """
     Override to add init-fairseq-model arg.
     """
     super().add_cmdline_args(parser, partial_opt=partial_opt)
     group = parser.add_argument_group('Bart Args')
     group.add_argument(
         '--init-fairseq-model',
         type=str,
         default=None,
         help='fairseq checkpoint for bart',
     )
     group.add_argument(
         '--output-conversion-path',
         type=str,
         default=None,
         help='where to save fairseq conversion',
     )
     parser.set_defaults(dict_tokenizer='gpt2')
     parser.set_defaults(**BART_ARGS)
     return parser
Beispiel #23
0
 def add_cmdline_args(cls,
                      parser: ParlaiParser,
                      partial_opt: Optional[Opt] = None) -> ParlaiParser:
     agent = parser.add_argument_group("Gpt2 Args")
     agent.add_argument("--model-name",
                        type=str,
                        default=None,
                        help="Any GPT-2 model names.")
     agent.add_argument(
         "--gpt2-size",
         type=str,
         default="small",
         choices=["small", "medium", "large", "xl", "distilgpt2"],
         help="Which size model to initialize.",
     )
     agent.add_argument(
         "--add-special-tokens",
         type="bool",
         default=True,
         help="Add special tokens (like PAD, etc.). If False, "
         "Can only use with batch size 1.",
     )
     agent.add_argument(
         "--add-start-token",
         type="bool",
         default=False,
         help="Add start tokens when finetuning.",
     )
     parser.set_defaults(
         text_truncate=768,
         label_truncate=256,
         dict_maxexs=0,  # skip building dictionary
     )
     super().add_cmdline_args(parser, partial_opt=partial_opt)
     warn_once(
         "WARNING: this model is in beta and the API is subject to change.")
     return agent
Beispiel #24
0
 def add_cmdline_args(
     cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
 ) -> ParlaiParser:
     """
     Add CLI args.
     """
     super().add_cmdline_args(parser, partial_opt=partial_opt)
     parser = parser.add_argument_group('BERT Classifier Arguments')
     parser.add_argument(
         '--type-optimization',
         type=str,
         default='all_encoder_layers',
         choices=[
             'additional_layers',
             'top_layer',
             'top4_layers',
             'all_encoder_layers',
             'all',
         ],
         help='which part of the encoders do we optimize '
         '(defaults to all layers)',
     )
     parser.add_argument(
         '--add-cls-token',
         type='bool',
         default=True,
         help='add [CLS] token to text vec',
     )
     parser.add_argument(
         '--sep-last-utt',
         type='bool',
         default=False,
         help='separate the last utterance into a different'
         'segment with [SEP] token in between',
     )
     parser.set_defaults(dict_maxexs=0)  # skip building dictionary
     return parser
Beispiel #25
0
 def add_cmdline_args(cls,
                      parser: ParlaiParser,
                      partial_opt: Optional[Opt] = None) -> ParlaiParser:
     """
     Add CLI args.
     """
     super().add_cmdline_args(parser, partial_opt=partial_opt)
     parser = parser.add_argument_group("BERT Classifier Arguments")
     parser.add_argument(
         "--type-optimization",
         type=str,
         default="all_encoder_layers",
         choices=[
             "additional_layers",
             "top_layer",
             "top4_layers",
             "all_encoder_layers",
             "all",
         ],
         help="which part of the encoders do we optimize "
         "(defaults to all layers)",
     )
     parser.add_argument(
         "--add-cls-token",
         type="bool",
         default=True,
         help="add [CLS] token to text vec",
     )
     parser.add_argument(
         "--sep-last-utt",
         type="bool",
         default=False,
         help="separate the last utterance into a different"
         "segment with [SEP] token in between",
     )
     parser.set_defaults(dict_maxexs=0)  # skip building dictionary
     return parser
Beispiel #26
0
def setup_args(parser=None):
    if parser is None:
        parser = ParlaiParser(True, True, 'Evaluate a model')
    # Get command line arguments
    parser.add_argument(
        '-rf',
        '--report-filename',
        type=str,
        default='',
        help='Saves a json file of the evaluation report either as an '
        'extension to the model-file (if begins with a ".") or a whole '
        'file path. Set to the empty string to not save at all.',
    )
    parser.add_argument(
        '--save-world-logs',
        type='bool',
        default=False,
        help='Saves a jsonl file containing all of the task examples and '
        'model replies. Must also specify --report-filename.',
    )
    parser.add_argument('-ne', '--num-examples', type=int, default=-1)
    parser.add_argument('-d', '--display-examples', type='bool', default=False)
    parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2)
    parser.add_argument(
        '-mcs',
        '--metrics',
        type=str,
        default='default',
        help='list of metrics to show/compute, e.g. all, default,'
        'or give a list split by , like '
        'ppl,f1,accuracy,hits@1,rouge,bleu'
        'the rouge metrics will be computed as rouge-1, rouge-2 and rouge-l',
    )
    WorldLogger.add_cmdline_args(parser)
    TensorboardLogger.add_cmdline_args(parser)
    parser.set_defaults(datatype='valid')
    return parser
 def setup_args(cls):
     """
     Setup args.
     """
     parser = ParlaiParser(True, True, 'Index Dense Embs')
     parser.add_argument(
         '--embeddings-dir', type=str, help='directory of embeddings'
     )
     parser.add_argument(
         '--embeddings-name', type=str, default='', help='name of emb part'
     )
     parser.add_argument(
         '--partition-index',
         type='bool',
         default=False,
         help='specify True to partition indexing per file (useful when all files do not fit into memory)',
     )
     parser.add_argument(
         '--save-index-dir',
         type=str,
         help='directory in which to save index',
         default=None,
     )
     parser.add_argument(
         '--num-shards',
         type=int,
         default=1,
         help='how many workers to use to split up the work',
     )
     parser.add_argument(
         '--shard-id',
         type=int,
         help='shard id for this worker. should be between 0 and num_shards',
     )
     parser = RagAgent.add_cmdline_args(parser)
     parser.set_defaults(compressed_indexer_gpu_train=True)
     return parser
Beispiel #28
0
 def add_cmdline_args(cls,
                      parser: ParlaiParser,
                      partial_opt: Optional[Opt] = None) -> ParlaiParser:
     add_common_args(parser)
     parser = parser.add_argument_group('Bert Ranker Arguments')
     parser.add_argument(
         '--biencoder-model-file',
         type=str,
         default=None,
         help='path to biencoder model. Default to model-file_bi',
     )
     parser.add_argument(
         '--biencoder-top-n',
         type=int,
         default=10,
         help=
         'default number of elements to keep from the biencoder response',
     )
     parser.add_argument(
         '--crossencoder-model-file',
         type=str,
         default=None,
         help='path to crossencoder model. Default to model-file_cross',
     )
     parser.add_argument(
         '--crossencoder-batchsize',
         type=int,
         default=-1,
         help=
         'crossencoder will be fed those many elements at train or eval time.',
     )
     parser.set_defaults(
         encode_candidate_vecs=True,
         dict_maxexs=0  # skip building dictionary
     )
     return parser
Beispiel #29
0
def main():
    random.seed(42)

    # Get command line arguments
    parser = ParlaiParser(True, True)
    parser.add_argument('-n', '--num-examples', default=100000000)
    parser.add_argument('-d', '--display-examples', type='bool', default=False)
    parser.set_defaults(datatype='valid')
    opt = parser.parse_args()
    # Create model and assign it to the specified task
    agent = create_agent(opt)
    world = create_task(opt, agent)

    # Show some example dialogs:
    for k in range(int(opt['num_examples'])):
        world.parley()
        print("---")
        if opt['display_examples']:
            print(world.display() + "\n~~")
        print(world.report())
        if world.epoch_done():
            print("EPOCH DONE")
            break
    world.shutdown()
Beispiel #30
0
def setup_args(parser=None):
    if parser is None:
        parser = ParlaiParser(
            True, True, 'Interactive chat with a model on the command line'
        )
    parser.add_argument('-d', '--display-examples', type='bool', default=False)
    parser.add_argument(
        '--display-prettify',
        type='bool',
        default=False,
        help='Set to use a prettytable when displaying '
        'examples with text candidates',
    )
    parser.add_argument(
        '--display-ignore-fields',
        type=str,
        default='label_candidates,text_candidates',
        help='Do not display these fields',
    )
    parser.add_argument(
        '-it',
        '--interactive-task',
        type='bool',
        default=True,
        help='Create interactive version of task',
    )
    parser.add_argument(
        '--save-world-logs',
        type='bool',
        default=False,
        help='Saves a jsonl file containing all of the task examples and '
        'model replies. Must also specify --report-filename.',
    )
    parser.set_defaults(interactive_mode=True, task='interactive')
    LocalHumanAgent.add_cmdline_args(parser)
    return parser
Beispiel #31
0
    def add_cmdline_args(cls,
                         parser: ParlaiParser,
                         partial_opt: Optional[Opt] = None) -> ParlaiParser:
        """
        Add command-line arguments specifically for this agent.
        """
        AbstractReranker.add_cmdline_args(parser)
        parser.set_defaults(skip_generation=False)
        gen_agent = parser.add_argument_group("Generator Rerank Agent")
        gen_agent.add_argument(
            '--inference-strategies',
            type=str,
            default=None,
            help='comma-separated list of inference strategies. '
            'if specified, re-rank over several inference strategies',
        )
        gen_agent.add_argument(
            '--debug-mode',
            type='bool',
            default=False,
            help='specify to enable certain debugging procedures.',
        )

        return parser
Beispiel #32
0
def main():
    random.seed(42)

    # Get command line arguments
    parser = ParlaiParser(True, True)
    parser.add_argument('-n', '--num-examples', default=100000000)
    parser.add_argument('-d', '--display-examples', type='bool', default=False)
    parser.set_defaults(datatype='valid')
    opt = parser.parse_args()
    # Create model and assign it to the specified task
    agent = create_agent(opt)
    world = create_task(opt, agent)

    # Show some example dialogs:
    for k in range(int(opt['num_examples'])):
        world.parley()
        print("---")
        if opt['display_examples']:
            print(world.display() + "\n~~")
        print(world.report())
        if world.epoch_done():
            print("EPOCH DONE")
            break
    world.shutdown()
Beispiel #33
0
def setup_args(parser=None):
    if parser is None:
        parser = ParlaiParser(True, True, 'Generate self-chats of a model')
    parser.add_argument('--seed', type=int, default=42)
    parser.add_argument('-d', '--display-examples', type='bool', default=True)
    parser.add_argument(
        '--display-add-fields',
        type=str,
        default='',
        help='Display these fields when verbose is off (e.g., "--display-add-fields label_candidates,beam_texts")',
    )
    parser.add_argument(
        '-st',
        '--selfchat-task',
        type='bool',
        default=True,
        help='Create a self chat version of the task',
    )
    parser.add_argument(
        '--num-self-chats', type=int, default=1, help='Number of self chats to run'
    )
    parser.add_argument(
        '--selfchat-max-turns',
        type=int,
        default=6,
        help='The number of dialogue turns before self chat ends',
    )
    parser.add_argument(
        '--seed-messages-from-task',
        type='bool',
        default=False,
        help='Automatically seed conversation with messages from task dataset.',
    )
    parser.add_argument(
        '--seed-messages-from-file',
        default=None,
        help='If specified, loads newline-separated strings from the file as conversation starters.',
    )
    parser.add_argument(
        '--outfile', type=str, default=None, help='File to save self chat logs'
    )
    parser.add_argument(
        '--save-format',
        type=str,
        default='conversations',
        choices=['conversations', 'parlai'],
        help='Format to save logs in. conversations is a jsonl format, parlai is a text format.',
    )
    parser.add_argument(
        '-pmf',
        '--partner-model-file',
        default=None,
        help='Define a different partner for self chat',
    )
    parser.add_argument(
        '--partner-opt-file',
        default=None,
        help='Path to file containing opts to override for partner',
    )
    parser.set_defaults(interactive_mode=True, task='self_chat')
    WorldLogger.add_cmdline_args(parser, partial_opt=None)
    return parser