def setup_args(parser=None): if parser is None: parser = ParlaiParser(True, True, 'Generate self-chats of a model') parser.add_argument('--seed', type=int, default=42) parser.add_argument('-d', '--display-examples', type='bool', default=True) parser.add_argument( '--display-ignore-fields', type=str, default='label_candidates,text_candidates', help='Do not display these fields', ) parser.add_argument( '-st', '--selfchat-task', type='bool', default=True, help='Create a self chat version of the task', ) parser.add_argument('--num-self-chats', type=int, default=1, help='Number of self chats to run') parser.add_argument( '--selfchat-max-turns', type=int, default=6, help='The number of dialogue turns before self chat ends', ) parser.add_argument( '--seed-messages-from-task', action='store_true', help='Automatically seed conversation with messages from task dataset.', ) parser.add_argument('--outfile', type=str, default=None, help='File to save self chat logs') parser.add_argument( '--save-format', type=str, default='conversations', choices=['conversations', 'parlai'], help= 'Format to save logs in. conversations is a jsonl format, parlai is a text format.', ) parser.add_argument( '-pmf', '--partner-model-file', default=None, help='Define a different partner for self chat', ) parser.add_argument( '--partner-opt-file', default=None, help='Path to file containing opts to override for partner', ) parser.set_defaults(interactive_mode=True, task='self_chat') WorldLogger.add_cmdline_args(parser) return parser
def setup_args(parser=None): if parser is None: parser = ParlaiParser(True, True, 'Self chat with a model') parser.add_argument('--seed', type=int, default=42) parser.add_argument('-d', '--display-examples', type='bool', default=True) parser.add_argument('-n', '-ne', '--num-examples', type=int, default=10) parser.add_argument('-nd', '--num-dialogues', type=int, default=10) parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2) parser.add_argument('-host', '--mongo-host', type=str) parser.add_argument('-port', '--mongo-port', type=int) parser.add_argument('-user', '--user-name', type=str) parser.add_argument('-pw', '--password', type=str) parser.add_argument('-col', '--collection-name', type=str) parser.add_argument( '-mf1', '--model-file1', default=None, help='model file name for loading and saving models', ) parser.add_argument( '-mf2', '--model-file2', default=None, help='model file name for loading and saving models', ) parser.add_argument( '--display-ignore-fields', type=str, default='label_candidates,text_candidates', help='Do not display these fields', ) parser.add_argument( '-it', '--interactive-task', type='bool', default=True, help='Create interactive version of task', ) parser.add_argument( '--selfchat-max-turns', type=int, default=10, help="The number of dialogue turns before self chat ends.", ) parser.add_argument( '--seed-messages-from-task', action='store_true', help="Automatically seed conversation with messages from task dataset.", ) parser.add_argument('--outfile', type=str, default='/tmp/selfchat.json') parser.add_argument('--format', type=str, default='json', choices={'parlai', 'json'}) parser.set_defaults(interactive_mode=True, task='self_chat') WorldLogger.add_cmdline_args(parser) return parser
def setup_args(parser=None): if parser is None: parser = ParlaiParser( True, True, 'Interactive chat with a model on the command line' ) parser.add_argument('-d', '--display-examples', type='bool', default=False) parser.set_defaults(interactive_mode=True, task='interactive') LocalHumanAgent.add_cmdline_args(parser) WorldLogger.add_cmdline_args(parser) return parser
def setup_args(parser=None): if parser is None: parser = ParlaiParser(True, True, 'Self chat with a model') parser.add_argument('--seed', type=int, default=42) parser.add_argument('-d', '--display-examples', type='bool', default=True) parser.add_argument('-n', '-ne', '--num-examples', type=int, default=10) parser.add_argument('-nd', '--num-dialogues', type=int, default=10) parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2) parser.add_argument('-host', '--mongo-host', type=str) parser.add_argument('-port', '--mongo-port', type=int) parser.add_argument('-user', '--user-name', type=str) parser.add_argument('-pw', '--password', type=str) parser.add_argument( '--display-ignore-fields', type=str, default='label_candidates,text_candidates', help='Do not display these fields', ) parser.add_argument( '-st', '--selfchat-task', type='bool', default=True, help='Create a self chat version of the task', ) parser.add_argument('--num-self-chats', type=int, default=1, help='Number of self chats to run') parser.add_argument( '--selfchat-max-turns', type=int, default=6, help='The number of dialogue turns before self chat ends', ) parser.add_argument( '--seed-messages-from-task', action='store_true', help='Automatically seed conversation with messages from task dataset.', ) parser.add_argument('--outfile', type=str, default=None, help='File to save self chat logs') parser.add_argument( '--save-format', type=str, default='conversations', choices=['conversations', 'parlai'], help= 'Format to save logs in. conversations is a jsonl format, parlai is a text format.', ) parser.set_defaults(interactive_mode=True, task='self_chat') WorldLogger.add_cmdline_args(parser) return parser
def setup_args(parser=None): if parser is None: parser = ParlaiParser(True, True, 'Evaluate a model') # Get command line arguments parser.add_argument( '-rf', '--report-filename', type=str, default='', help='Saves a json file of the evaluation report either as an ' 'extension to the model-file (if begins with a ".") or a whole ' 'file path. Set to the empty string to not save at all.', ) parser.add_argument( '--world-logs', type=str, default='', help='Saves a jsonl file of the world logs.' 'Set to the empty string to not save at all.', ) parser.add_argument( '--save-format', type=str, default='conversations', choices=['conversations', 'parlai'], ) parser.add_argument('-ne', '--num-examples', type=int, default=-1) parser.add_argument('-d', '--display-examples', type='bool', default=False) parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=10) parser.add_argument( '-mcs', '--metrics', type=str, default='default', help='list of metrics to show/compute, e.g. all, default,' 'or give a list split by , like ' 'ppl,f1,accuracy,hits@1,rouge,bleu' 'the rouge metrics will be computed as rouge-1, rouge-2 and rouge-l', ) parser.add_argument( '-micro', '--aggregate-micro', type='bool', default=False, help='Report micro-averaged metrics instead of macro averaged metrics.', recommended=False, ) WorldLogger.add_cmdline_args(parser, partial_opt=None) TensorboardLogger.add_cmdline_args(parser, partial_opt=None) parser.set_params(datatype='valid') return parser
def setup_args(parser=None): if parser is None: parser = ParlaiParser(True, True, 'Evaluate a model') parser.add_pytorch_datateacher_args() # Get command line arguments parser.add_argument('-rp', '--report', type=str, default="/tmp/eval_model.json") parser.add_argument( '-rf', '--report-filename', type=str, default='', help='Saves a json file of the evaluation report either as an ' 'extension to the model-file (if begins with a ".") or a whole ' 'file path. Set to the empty string to not save at all.', ) parser.add_argument( '--save-world-logs', type='bool', default=False, help='Saves a jsonl file containing all of the task examples and ' 'model replies. Must also specify --report-filename.', ) parser.add_argument('-ne', '--num-examples', type=int, default=-1) parser.add_argument('-d', '--display-examples', type='bool', default=False) parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2) parser.add_argument( '-micro', '--aggregate-micro', type='bool', default=False, help='If multitasking, average metrics over the ' 'number of examples. If false, averages over the ' 'number of tasks.', ) parser.add_argument( '-mcs', '--metrics', type=str, default='default', help='list of metrics to show/compute, e.g. all, default,' 'or give a list split by , like ' 'ppl,f1,accuracy,hits@1,rouge,bleu' 'the rouge metrics will be computed as rouge-1, rouge-2 and rouge-l', ) WorldLogger.add_cmdline_args(parser) TensorboardLogger.add_cmdline_args(parser) parser.set_defaults(datatype='valid') return parser
def setup_args(parser=None): if parser is None: parser = ParlaiParser(True, True, 'Interactive chat with a model') parser.add_argument('-d', '--display-examples', type='bool', default=False) parser.add_argument( '--display-prettify', type='bool', default=False, help='Set to use a prettytable when displaying ' 'examples with text candidates', ) parser.add_argument( '--display-ignore-fields', type=str, default='label_candidates,text_candidates', help='Do not display these fields', ) parser.add_argument( '-it', '--interactive-task', type='bool', default=True, help='Create interactive version of task', ) parser.add_argument( '-rf', '--report-filename', type=str, default='', help='Saves a json file of the evaluation report either as an ' 'extension to the model-file (if begins with a ".") or a whole ' 'file path. Set to the empty string to not save at all.', ) parser.add_argument( '--save-world-logs', type='bool', default=False, help='Saves a jsonl file containing all of the task examples and ' 'model replies. Must also specify --report-filename.', ) parser.add_argument('--world-logs-format', type=str, default='parlai', choices=['jsonl', 'parlai', 'forever'], help='File format to save chat logs. (default parlai)') parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2) parser.set_defaults(interactive_mode=True, task='interactive') LocalHumanAgent.add_cmdline_args(parser) WorldLogger.add_cmdline_args(parser) return parser
def setup_args(parser=None): if parser is None: parser = ParlaiParser( True, True, 'Interactive chat with a model on the command line') parser.add_argument('-d', '--display-examples', type='bool', default=False) parser.add_argument( '--display-prettify', type='bool', default=False, help='Set to use a prettytable when displaying ' 'examples with text candidates', ) parser.add_argument( '--display-ignore-fields', type=str, default='label_candidates,text_candidates', help='Do not display these fields', ) parser.add_argument( '-it', '--interactive-task', type='bool', default=True, help='Create interactive version of task', ) parser.add_argument( '--outfile', type=str, default='', help='Saves a jsonl file containing all of the task examples and ' 'model replies. Set to the empty string to not save at all', ) parser.add_argument( '--save-format', type=str, default='parlai', choices=['conversations', 'parlai'], help= 'Format to save logs in. conversations is a jsonl format, parlai is a text format.', ) parser.set_defaults(interactive_mode=True, task='interactive') LocalHumanAgent.add_cmdline_args(parser) WorldLogger.add_cmdline_args(parser) return parser
def setup_args(parser=None): if parser is None: parser = ParlaiParser(True, True, 'Self chat with a model') parser.add_argument('--seed', type=int, default=42) parser.add_argument('-d', '--display-examples', type='bool', default=True) parser.add_argument('-n', '-ne', '--num-examples', type=int, default=10) parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=60) parser.add_argument( '--display-ignore-fields', type=str, default='label_candidates,text_candidates', help='Do not display these fields', ) parser.add_argument( '-it', '--interactive-task', type='bool', default=True, help='Create interactive version of task', ) parser.add_argument( '--selfchat-max-turns', type=int, default=10, help="The number of dialogue turns before self chat ends.", ) parser.add_argument( '--seed-messages-from-task', action='store_true', help="Automatically seed conversation with messages from task dataset.", ) parser.add_argument('--outfile', type=str, default='/tmp/selfchat.json') parser.add_argument('--format', type=str, default='jsonl', choices={'parlai', 'jsonl'}) parser.add_argument('--indent', type=int, default=4, help='how much to indent jsonl string') parser.set_defaults(interactive_mode=True, task='self_chat') WorldLogger.add_cmdline_args(parser) return parser
def setup_args(parser=None): if parser is None: parser = ParlaiParser(True, True, 'Display data from a task') #parser.add_pytorch_datateacher_args() # Get command line arguments parser.add_argument('--seed', type=int, default=42) parser.add_argument('-n', '-ne', '--num-examples', type=int, default=10) parser.add_argument('-ns', '--num-stored', type=int, default=10) parser.add_argument('-mdl', '--max-display-len', type=int, default=1000) parser.add_argument('--display-ignore-fields', type=str, default='agent_reply') parser.set_defaults(datatype='train:stream') parser.add_argument('-host', '--mongo-host', type=str) parser.add_argument('-port', '--mongo-port', type=int) parser.add_argument('-user', '--user-name', type=str) parser.add_argument('-pw', '--password', type=str) parser.add_argument('-col', '--collection-name', type=str) WorldLogger.add_cmdline_args(parser) return parser
def setup_args(parser=None) -> ParlaiParser: """ Build the ParlAI parser, adding command line args if necessary. :param ParlaiParser parser: Preexisting parser to append options to. Will be created if needed. :returns: the ParlaiParser with CLI options added. """ if parser is None: parser = ParlaiParser(True, True, 'Train a model') train = parser.add_argument_group('Training Loop Arguments') train.add_argument( '-et', '--evaltask', help='task to use for valid/test (defaults to the one used for training)', ) train.add_argument( '--final-extra-opt', type=str, default='', help="A '.opt' file that is used for final eval. Useful for setting skip-generation to false. 'datatype' must be included as part of the opt.", ) train.add_argument( '--eval-batchsize', type=int, hidden=True, help='Eval time batch size (defaults to same as -bs)', ) train.add_argument( '--eval-dynamic-batching', # FIXME: see https://github.com/facebookresearch/ParlAI/issues/3367 default=None, type='nonestr', choices={None, 'off', 'full', 'batchsort'}, help=( 'Set dynamic batching at evaluation time. Set to off for ' 'train-only dynamic batching. Set to none (default) to use same ' 'setting as --dynamic-batching.' ), ) train.add_argument( '--num-workers', default=0, type=int, help='Number of background workers (training only)', ) train.add_argument('--display-examples', type='bool', default=False, hidden=True) train.add_argument('-eps', '--num-epochs', type=float, default=-1) train.add_argument('-ttim', '--max-train-time', type=float, default=-1) train.add_argument( '-tstep', '--max-train-steps', '--max-lr-steps', type=int, default=-1, help='End training after n model updates', ) train.add_argument('-ltim', '--log-every-n-secs', type=float, default=-1) train.add_argument( '-lstep', '--log-every-n-steps', type=int, default=50, help='Log every n training steps', ) train.add_argument( '-vtim', '--validation-every-n-secs', type=float, default=-1, help='Validate every n seconds. Saves model to model_file ' '(if set) whenever best val metric is found', ) train.add_argument( '-vstep', '--validation-every-n-steps', type=int, default=-1, help='Validate every n training steps. Saves model to model_file ' '(if set) whenever best val metric is found', ) train.add_argument( '-stim', '--save-every-n-secs', type=float, default=-1, help='Saves the model to model_file.checkpoint after ' 'every n seconds (default -1, never).', ) train.add_argument( '-sval', '--save-after-valid', type='bool', default=False, help='Saves the model to model_file.checkpoint after ' 'every validation (default %(default)s).', ) train.add_argument( '-veps', '--validation-every-n-epochs', type=float, default=-1, help='Validate every n epochs. Saves model to model_file ' '(if set) whenever best val metric is found', ) train.add_argument( '-vme', '--validation-max-exs', type=int, default=-1, hidden=True, help='max examples to use during validation (default -1 uses all)', ) train.add_argument( '--short-final-eval', default=False, hidden=True, type='bool', help='If true, obeys --validation-max-exs in the final ' 'validation and test evaluations.', ) train.add_argument( '-vp', '--validation-patience', type=int, default=10, help=( 'number of iterations of validation where result' ' does not improve before we stop training' ), ) train.add_argument( '-vmt', '--validation-metric', default='accuracy', help='key into report table for selecting best validation', ) train.add_argument( '-vmm', '--validation-metric-mode', type=str, choices=['max', 'min'], help='the direction in which to optimize the validation metric, i.e. maximize or minimize', ) train.add_argument( '-vcut', '--validation-cutoff', type=float, default=1.0, hidden=True, help='value at which training will stop if exceeded by metric', ) train.add_argument( '-lfc', '--load-from-checkpoint', type='bool', default=True, hidden=True, help='load model from checkpoint if available', ) train.add_argument( '-vshare', '--validation-share-agent', default=False, hidden=True, help='use a shared copy of the agent for validation. ' 'this will eventually default to True, but ' 'currently defaults to False.', ) train.add_argument( '-mcs', '--metrics', type=str, default='default', help='list of metrics to show/compute, e.g. all, default,' 'or give a list split by , like ' 'ppl,f1,accuracy,hits@1,rouge,bleu' 'the rouge metrics will be computed as rouge-1, rouge-2 and rouge-l', ) train.add_argument( '-micro', '--aggregate-micro', type='bool', default=False, help='Report micro-averaged metrics instead of macro averaged metrics.', recommended=False, ) train.add_argument( '--world-logs', type=str, default='', help='Saves a jsonl file of the world logs.' 'Set to the empty string to not save at all.', ) train.add_argument( '--save-format', type=str, default='conversations', choices=['conversations', 'parlai'], ) WorldLogger.add_cmdline_args(parser, partial_opt=None) TensorboardLogger.add_cmdline_args(parser, partial_opt=None) WandbLogger.add_cmdline_args(parser, partial_opt=None) parser = setup_dict_args(parser) return parser
def setup_args(parser=None): if parser is None: parser = ParlaiParser(True, True, 'Evaluate a model') # Get command line arguments parser.add_argument( '-rf', '--report-filename', type=str, default='', help='Saves a json file of the evaluation report either as an ' 'extension to the model-file (if begins with a ".") or a whole ' 'file path. Set to the empty string to not save at all.', ) parser.add_argument( '--world-logs', type=str, default='', help='Saves a jsonl file of the world logs.' 'Set to the empty string to not save at all.', ) parser.add_argument( '--save-format', type=str, default='conversations', choices=['conversations', 'parlai'], ) parser.add_argument( '--area-under-curve-digits', '-auc', type=int, default=-1, help= 'a positive number indicates to calculate the area under the roc curve and it also determines how many decimal digits of the predictions to keep (higher numbers->more precise); also used to determine whether or not to calculate the AUC metric', ) parser.add_argument( '--area-under-curve-class', '-auclass', type=str, default=None, nargs='*', help='the name(s) of the class to calculate the auc for', ) parser.add_argument('-ne', '--num-examples', type=int, default=-1) parser.add_argument('-d', '--display-examples', type='bool', default=False) parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=10) parser.add_argument( '-mcs', '--metrics', type=str, default='default', help='list of metrics to show/compute, e.g. all, default,' 'or give a list split by , like ' 'ppl,f1,accuracy,hits@1,rouge,bleu' 'the rouge metrics will be computed as rouge-1, rouge-2 and rouge-l', ) parser.add_argument( '-micro', '--aggregate-micro', type='bool', default=False, help='Report micro-averaged metrics instead of macro averaged metrics.', recommended=False, ) WorldLogger.add_cmdline_args(parser, partial_opt=None) TensorboardLogger.add_cmdline_args(parser, partial_opt=None) parser.set_params(datatype='valid') return parser
type=str, default='default', help='list of metrics to show/compute, e.g. all, default,' 'or give a list split by , like ' 'ppl,f1,accuracy,hits@1,rouge,bleu' 'the rouge metrics will be computed as rouge-1, rouge-2 and rouge-l', ) parser.add_argument( '-micro', '--aggregate-micro', type='bool', default=False, help='Report micro-averaged metrics instead of macro averaged metrics.', recommended=False, ) WorldLogger.add_cmdline_args(parser) >>>>>>> b49eba4519856f6ab83b869b168c6af99863df47 TensorboardLogger.add_cmdline_args(parser) parser.set_params(datatype='valid') return parser def _save_eval_stats(opt, report): report_fname = opt['report_filename'] if report_fname == '': return if report_fname.startswith('.'): report_fname = opt['model_file'] + report_fname json_serializable_report = report for k, v in report.items():