def setup_args(): parser = train_args() profile = parser.add_argument_group('Profiler Arguments') profile.add_argument('--use-nvprof', type='bool', default=False, help='If True, uses nvprof (might incur high overhead' ' and assumes that the whole process is running ' 'inside nvprof), otherwise uses a custom ' 'CPU-only profiler (with negligible overhead). ' 'Default: False.') profile.add_argument('--trace-path', type=str, default=None, help='A path of the CUDA checkpoint. If specified, it' ' will be left unmodified after profiling ' 'finishes, so it can be opened and inspected in ' 'nvvp. Otherwise it will be created in a ' 'temporary directory and removed after reading ' 'the results.') profile.add_argument('--debug', type='bool', default=False, help='If true, enter debugger at end of run.') return parser
def setup_args(parser=None): if parser is None: parser = ParlaiParser(True, True) parser = train_args(parser) profile = parser.add_argument_group('Profiler Arguments') profile.add_argument('--torch', type='bool', default=False, help='If true, use the torch profiler. Otherwise use cProfile.') profile.add_argument('--torch-cuda', type='bool', default=False, help='If true, use the torch cuda profiler. Otherwise use cProfile.') profile.add_argument('--debug', type='bool', default=False, help='If true, enter debugger at end of run.') return parser
def setup_args(): parser = train_args() profile = parser.add_argument_group('Profiler Arguments') profile.add_argument('--torch', type='bool', default=False, help='If true, use the torch profiler. Otherwise use ' 'use cProfile.') profile.add_argument('--debug', type='bool', default=False, help='If true, enter debugger at end of run.') return parser