def setup_args(parser=None): if parser is None: parser = ParlaiParser(True, False, 'Load/extract image features') parser.add_pytorch_datateacher_args() arg_group = parser.add_argument_group('Image Extraction') arg_group.add_argument( '--dataset', type=str, default=None, help='Pytorch Dataset; if specified, will save \ the images in one hdf5 file according to how \ they are returned by the specified dataset', ) arg_group.add_argument( '-at', '--attention', action='store_true', help='Whether to extract image features with attention \ (Note - this is specifically for the mlb_vqa model)', ) arg_group.add_argument( '--use-hdf5-extraction', type='bool', default=False, help='Whether to extract images into an hdf5 dataset', ) return parser
def setup_args(parser=None): if parser is None: parser = ParlaiParser(True, True, 'Evaluate a model') parser.add_pytorch_datateacher_args() # Get command line arguments # Other command line arguments parser.add_argument('-ne', '--num-examples', type=int, default=-1) parser.add_argument('-d', '--display-examples', type='bool', default=False) parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2) parser.add_argument( '-micro', '--aggregate-micro', type='bool', default=False, help='If multitasking, average metrics over the ' 'number of examples. If false, averages over the ' 'number of tasks.', ) parser.add_argument( '-mcs', '--metrics', type=str, default='default', help='list of metrics to show/compute, e.g. all, default,' 'or give a list split by , like ' 'ppl,f1,accuracy,hits@1,rouge,bleu' 'the rouge metrics will be computed as rouge-1, rouge-2 and rouge-l', ) TensorboardLogger.add_cmdline_args(parser) parser.set_defaults(datatype='valid') return parser
def setup_args(parser=None): if parser is None: parser = ParlaiParser(True, True, 'Evaluate a model') parser.add_pytorch_datateacher_args() # Get command line arguments parser.set_defaults(datatype='valid') return parser
def setup_args(parser=None): if parser is None: parser = ParlaiParser(True, True, 'Evaluate a model') parser.add_pytorch_datateacher_args() # Get command line arguments parser.add_argument('-ne', '--num-examples', type=int, default=-1) parser.add_argument('-d', '--display-examples', type='bool', default=False) parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2) parser.add_argument( '-micro', '--aggregate-micro', type='bool', default=False, help='If multitasking, average metrics over the ' 'number of examples. If false, averages over the ' 'number of tasks.', ) parser.add_argument( '--metrics', type=str, default='all', help='list of metrics to show/compute, e.g. ' 'ppl, f1, accuracy, hits@1.' 'If `all` is specified [default] all are shown.', ) TensorboardLogger.add_cmdline_args(parser) parser.set_defaults(datatype='valid') return parser
def setup_args(parser=None): if parser is None: parser = ParlaiParser() parser.add_pytorch_datateacher_args() parser.set_params( model='parlai.agents.local_human.local_human:LocalHumanAgent') return parser
def setup_args(parser=None): if parser is None: parser = ParlaiParser(True, True, 'cProfile a training run') parser.add_pytorch_datateacher_args() parser = train_args(parser) profile = parser.add_argument_group('Profiler Arguments') profile.add_argument( '--torch', type='bool', default=False, help='If true, use the torch profiler. Otherwise use cProfile.', ) profile.add_argument( '--torch-cuda', type='bool', default=False, help='If true, use the torch cuda profiler. Otherwise use cProfile.', ) profile.add_argument( '--debug', type='bool', default=False, help='If true, enter debugger at end of run.', ) return parser
def setup_args(parser=None, hidden=True): if parser is None: parser = ParlaiParser(True, True, 'Build a dictionary.') parser.add_pytorch_datateacher_args() dict_loop = parser.add_argument_group('Dictionary Loop Arguments') dict_loop.add_argument('--dict-maxexs', default=-1, type=int, help='max number of examples to build dict on', hidden=hidden) dict_loop.add_argument( '--dict-include-valid', default=False, type='bool', help='Include validation set in dictionary building ' 'for task.', hidden=hidden) dict_loop.add_argument( '--dict-include-test', default=False, type='bool', help='Include test set in dictionary building for task.', hidden=hidden) dict_loop.add_argument('-ltim', '--log-every-n-secs', type=float, default=2, hidden=hidden) partial, _ = parser.parse_known_args(nohelp=True) if vars(partial).get('dict_class'): str2class(vars(partial).get('dict_class')).add_cmdline_args(parser) else: DictionaryAgent.add_cmdline_args(parser) return parser
def setup_args(parser=None): if parser is None: parser = ParlaiParser(True, True, 'compute statistics from model predictions') parser.add_pytorch_datateacher_args() DictionaryAgent.add_cmdline_args(parser) # Get command line arguments parser.add_argument('-ne', '--num-examples', type=int, default=-1) parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2) parser.add_argument('-ed', '--external-dict', type=str, default=None, help='External dictionary for stat computation') parser.add_argument('-fb', '--freq-bins', type=str, default='0,100,1000,10000', help='Bins boundaries for rare words stat') parser.add_argument('-dup', '--dump-predictions-path', type=str, default=None, help='Dump predictions into file') parser.add_argument('-cun', '--compute-unique', type=bool, default=True, help='Compute %% of unique responses from the model') parser.set_defaults(datatype='valid', model='repeat_label') TensorboardLogger.add_cmdline_args(parser) return parser
def setup_args(parser=None): if parser is None: parser = ParlaiParser(True, False, 'Lint for ParlAI tasks') parser.add_pytorch_datateacher_args() # Get command line arguments parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2) parser.add_argument( '--agent', type=int, default=0, help='Use teacher (agent 0) or model (agent 1)', choices=[0, 1], ) parser.add_argument( '--new_line_new_utt', type='bool', default=False, help='New lines treat substrings as separate utterances.', ) parser.add_argument( '--ignore_tokens', type=str, default='', help='ignore tokens containings these substrings (comma-separated)', ) parser.set_defaults(datatype='train:ordered') DictionaryAgent.add_cmdline_args(parser) return parser
def setup_args(parser=None): if parser is None: parser = ParlaiParser(True, True, 'Lint for ParlAI tasks') parser.add_pytorch_datateacher_args() # Get command line arguments parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2) parser.add_argument('-d', '--display-examples', type='bool', default=False) parser.set_defaults(datatype='train:stream') return parser
def setup_args(parser=None): if parser is None: parser = ParlaiParser(True, True, 'Check task for offensive language') parser.add_pytorch_datateacher_args() # Get command line arguments parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2) parser.add_argument('-d', '--display-examples', type='bool', default=False) parser.set_defaults(datatype='train:ordered') parser.set_defaults(model='repeat_query') return parser
def setup_args(parser=None): if parser is None: parser = ParlaiParser(True, True, 'Display data from a task') parser.add_pytorch_datateacher_args() # Get command line arguments parser.add_argument('-ne', '--num_examples', type=int, default=10) parser.add_argument('-mdl', '--max_display_len', type=int, default=1000) parser.add_argument('--display_ignore_fields', type=str, default='agent_reply') parser.set_defaults(datatype='train:stream') return parser
def setup_args(parser=None): if parser is None: parser = ParlaiParser(True, True, 'Evaluate a model') parser.add_pytorch_datateacher_args() # Get command line arguments parser.add_argument('-rp', '--report', type=str, default="/tmp/eval_model.json") parser.add_argument( '-rf', '--report-filename', type=str, default='', help='Saves a json file of the evaluation report either as an ' 'extension to the model-file (if begins with a ".") or a whole ' 'file path. Set to the empty string to not save at all.', ) parser.add_argument( '--save-world-logs', type='bool', default=False, help='Saves a jsonl file containing all of the task examples and ' 'model replies. Must also specify --report-filename.', ) parser.add_argument('-ne', '--num-examples', type=int, default=-1) parser.add_argument('-d', '--display-examples', type='bool', default=False) parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2) parser.add_argument( '-micro', '--aggregate-micro', type='bool', default=False, help='If multitasking, average metrics over the ' 'number of examples. If false, averages over the ' 'number of tasks.', ) parser.add_argument( '-mcs', '--metrics', type=str, default='default', help='list of metrics to show/compute, e.g. all, default,' 'or give a list split by , like ' 'ppl,f1,accuracy,hits@1,rouge,bleu' 'the rouge metrics will be computed as rouge-1, rouge-2 and rouge-l', ) WorldLogger.add_cmdline_args(parser) TensorboardLogger.add_cmdline_args(parser) parser.set_defaults(datatype='valid') return parser
def setup_args(parser=None): if parser is None: parser = ParlaiParser(True, True, 'Evaluate a model') parser.add_pytorch_datateacher_args() # Get command line arguments # Probing command line arguments parser.add_argument( '--probe', type=str, default=None, choices=['word_embeddings', 'encoder_state', 'combined'], help="Specify the type of representations to generate for probing. " "See 'Probing Neural Dialog for Conversational Understanding' for more details." ) parser.add_argument( '-t', '--tasks', type=str, nargs='+', required=True, help='Usage: -t trecquestion or -t trecquestion wnli multiwoz' '\nOnly compatible with names in probing/tasks') # Other command line arguments parser.add_argument('-ne', '--num-examples', type=int, default=-1) parser.add_argument('-d', '--display-examples', type='bool', default=False) parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2) parser.add_argument( '-micro', '--aggregate-micro', type='bool', default=False, help='If multitasking, average metrics over the ' 'number of examples. If false, averages over the ' 'number of tasks.', ) parser.add_argument( '-mcs', '--metrics', type=str, default='default', help='list of metrics to show/compute, e.g. all, default,' 'or give a list split by , like ' 'ppl,f1,accuracy,hits@1,rouge,bleu' 'the rouge metrics will be computed as rouge-1, rouge-2 and rouge-l', ) TensorboardLogger.add_cmdline_args(parser) parser.set_defaults(datatype='valid') parser.set_defaults(batchsize=256) return parser
def setup_args(parser=None): if parser is None: parser = ParlaiParser(True, True, 'Evaluate a model') parser.add_pytorch_datateacher_args() # Get command line arguments parser.add_argument('-ne', '--num-examples', type=int, default=-1) parser.add_argument('-d', '--display-examples', type='bool', default=False) parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2) parser.add_argument('--metrics', type=str, default="all", help="list of metrics to show/compute, e.g. " "ppl,f1,accuracy,hits@1." "If 'all' is specified [default] all are shown.") TensorboardLogger.add_cmdline_args(parser) parser.set_defaults(datatype='valid') return parser
def setup_args(): from parlai.core.params import ParlaiParser parser = ParlaiParser(True, True, 'Builds a pytorch data file.') parser.add_pytorch_datateacher_args() return dict_setup(parser)
def setup_args(parser=None): if parser is None: parser = ParlaiParser(True, True, 'Evaluate perplexity') parser.add_pytorch_datateacher_args() parser.set_defaults(datatype='valid') return parser
def setup_args(parser=None) -> ParlaiParser: """ Build the ParlAI parser, adding command line args if necessary. :param ParlaiParser parser: Preexisting parser to append options to. Will be created if needed. :returns: the ParlaiParser with CLI options added. """ if parser is None: parser = ParlaiParser(True, True, 'Train a model') parser.add_pytorch_datateacher_args() train = parser.add_argument_group('Training Loop Arguments') train.add_argument( '-et', '--evaltask', help=('task to use for valid/test (defaults to the ' 'one used for training if not set)'), ) train.add_argument( '--eval-batchsize', type=int, hidden=True, help='Eval time batch size (defaults to same as -bs)', ) train.add_argument('--display-examples', type='bool', default=False, hidden=True) train.add_argument('-eps', '--num-epochs', type=float, default=-1) train.add_argument('-ttim', '--max-train-time', type=float, default=-1) train.add_argument('-ltim', '--log-every-n-secs', type=float, default=2) train.add_argument( '-vtim', '--validation-every-n-secs', type=float, default=-1, help='Validate every n seconds. Saves model to model_file ' '(if set) whenever best val metric is found', ) train.add_argument( '-stim', '--save-every-n-secs', type=float, default=-1, help='Saves the model to model_file.checkpoint after ' 'every n seconds (default -1, never).', ) train.add_argument( '-sval', '--save-after-valid', type='bool', default=False, help='Saves the model to model_file.checkpoint after ' 'every validation (default %(default)s).', ) train.add_argument( '-veps', '--validation-every-n-epochs', type=float, default=-1, help='Validate every n epochs. Saves model to model_file ' '(if set) whenever best val metric is found', ) train.add_argument( '-vme', '--validation-max-exs', type=int, default=-1, hidden=True, help='max examples to use during validation (default ' '-1 uses all)', ) train.add_argument( '--short-final-eval', default=False, hidden=True, type='bool', help='If true, obeys --validation-max-exs in the final ' 'validation and test evaluations.', ) train.add_argument( '-vp', '--validation-patience', type=int, default=10, help=('number of iterations of validation where result' ' does not improve before we stop training'), ) train.add_argument( '-vmt', '--validation-metric', default='accuracy', help='key into report table for selecting best ' 'validation', ) train.add_argument( '-vmm', '--validation-metric-mode', type=str, choices=['max', 'min'], help='how to optimize validation metric (max or min)', ) train.add_argument( '-vcut', '--validation-cutoff', type=float, default=1.0, hidden=True, help='value at which training will stop if exceeded by ' 'training metric', ) train.add_argument( '-dbf', '--dict-build-first', hidden=True, type='bool', default=True, help='build dictionary first before training agent', ) train.add_argument( '-lfc', '--load-from-checkpoint', type='bool', default=False, hidden=True, help='load model from checkpoint if available', ) train.add_argument( '-vshare', '--validation-share-agent', default=False, hidden=True, help='use a shared copy of the agent for validation. ' 'this will eventually default to True, but ' 'currently defaults to False.', ) train.add_argument( '-micro', '--aggregate-micro', type='bool', default=False, help='If multitasking, average metrics over the number of examples. ' 'If false, averages over the number of tasks.', ) train.add_argument( '-mcs', '--metrics', type=str, default='default', help='list of metrics to show/compute, e.g. all, default,' 'or give a list split by , like ' 'ppl,f1,accuracy,hits@1,rouge,bleu' 'the rouge metrics will be computed as rouge-1, rouge-2 and rouge-l', ) TensorboardLogger.add_cmdline_args(parser) parser = setup_dict_args(parser) return parser
def setup_args(parser=None): if parser is None: parser = ParlaiParser(True, True, 'Train a model') parser.add_pytorch_datateacher_args() train = parser.add_argument_group('Training Loop Arguments') train.add_argument('-et', '--evaltask', help=('task to use for valid/test (defaults to the ' 'one used for training if not set)')) train.add_argument('--eval-batchsize', type=int, hidden=True, help='Eval time batch size (defaults to same as -bs)') train.add_argument('--display-examples', type='bool', default=False, hidden=True) train.add_argument('-eps', '--num-epochs', type=float, default=-1) train.add_argument('-ttim', '--max-train-time', type=float, default=-1) train.add_argument('-ltim', '--log-every-n-secs', type=float, default=2) train.add_argument( '-vtim', '--validation-every-n-secs', type=float, default=-1, help='Validate every n seconds. Saves model to model_file ' '(if set) whenever best val metric is found') train.add_argument('-stim', '--save-every-n-secs', type=float, default=-1, help='Saves the model to model_file.checkpoint after ' 'every n seconds (default -1, never).') train.add_argument('-sval', '--save-after-valid', type='bool', default=False, help='Saves the model to model_file.checkpoint after ' 'every validation (default %(default)s).') train.add_argument( '-veps', '--validation-every-n-epochs', type=float, default=-1, help='Validate every n epochs. Saves model to model_file ' '(if set) whenever best val metric is found') train.add_argument('-vme', '--validation-max-exs', type=int, default=-1, hidden=True, help='max examples to use during validation (default ' '-1 uses all)') train.add_argument('--short-final-eval', default=False, hidden=True, type='bool', help='If true, obeys --validation-max-exs in the final ' 'validation and test evaluations.') train.add_argument('-vp', '--validation-patience', type=int, default=10, help=('number of iterations of validation where result' ' does not improve before we stop training')) train.add_argument('-vmt', '--validation-metric', default='accuracy', help='key into report table for selecting best ' 'validation') train.add_argument('-vmm', '--validation-metric-mode', type=str, choices=['max', 'min'], help='how to optimize validation metric (max or min)') train.add_argument('-vcut', '--validation-cutoff', type=float, default=1.0, hidden=True, help='value at which training will stop if exceeded by ' 'training metric') train.add_argument('-dbf', '--dict-build-first', hidden=True, type='bool', default=True, help='build dictionary first before training agent') train.add_argument('-lfc', '--load-from-checkpoint', type='bool', default=False, hidden=True, help='load model from checkpoint if available') train.add_argument('-vshare', '--validation-share-agent', default=False, hidden=True, help='use a shared copy of the agent for validation. ' 'this will eventually default to True, but ' 'currently defaults to False.') TensorboardLogger.add_cmdline_args(parser) parser = setup_dict_args(parser) return parser