def train_step(self, sample, model, criterion, optimizer, ignore_grad=False): return MultilingualTranslationTask.train_step(self, sample, model, criterion, optimizer)
def add_args(parser): """Add task-specific arguments to the parser.""" # fmt: off MultilingualTranslationTask.add_args(parser) parser.add_argument('--encoder-latent-layer', action='store_true', help='latent layer selection in encoder') parser.add_argument('--decoder-latent-layer', action='store_true', help='latent layer selection in decoder') parser.add_argument('--target-layers', default=-1, type=int, help='number of effective layers to learn; -1 means no constraint') parser.add_argument('--sparsity-weight', default=0.0, type=float, help='weight for sparsity loss') parser.add_argument('--share-weight', default=0.0, type=float, help='weight for sharing loss') parser.add_argument('--soft-update', default=1, type=int, help='number of updates with soft sampling') parser.add_argument('--anneal-updates', default=1, type=int, help='number of updates to anneal the KL loss weight') parser.add_argument('--prior', default="uniform", type=str, help='prior used for computing KL loss')
def setup_task(cls, args, **kwargs): dicts, training = MultilingualTranslationTask.prepare(args, **kwargs) return cls(args, dicts, training)
def add_args(parser): """Add task-specific arguments to the parser.""" MultilingualTranslationTask.add_args(parser)
def inference_step(self, generator, models, sample, prefix_tokens=None): return MultilingualTranslationTask.inference_step( self, generator, models, sample, prefix_tokens)
def grad_denom(self, sample_sizes, criterion): return MultilingualTranslationTask.grad_denom(self, sample_sizes, criterion)
def init_logging_output(self, sample): return MultilingualTranslationTask.init_logging_output(sample)
def aggregate_logging_outputs(self, logging_outputs, criterion): return MultilingualTranslationTask.aggregate_logging_outputs( self, logging_outputs, criterion)
def valid_step(self, sample, model, criterion): return MultilingualTranslationTask.valid_step(self, sample, model, criterion)
def add_args(parser): """Add task-specific arguments to the parser.""" # fmt: off MultilingualTranslationTask.add_args(parser) # Speech related args: parser.add_argument( "--dataset-from-json", default=False, help= "whether to read the data from a JSON file or from indexed data containing " "the precomputed filterbanks") parser.add_argument( '--skip-normalization', action='store_true', default=False, help='if set, the input filterbanks are not normalized') parser.add_argument( '--legacy-audio-fix-lua-indexing', action='store_true', default=False, help= 'if set, the input filterbanks are subtracted 1 to remove +1 for lua indexing' ) parser.add_argument('--specaugment', action='store_true', default=False) parser.add_argument( '--frequency-masking-pars', type=int, default=13, help="Maximum number of frequencies that can be masked") parser.add_argument( '--time-masking-pars', type=int, default=13, help="Maximum number of time steps that can be masked") parser.add_argument( '--frequency-masking-num', type=int, default=2, help="Number of masks to apply along the frequency dimension") parser.add_argument( '--time-masking-num', type=int, default=2, help="Number of masks to apply along the time dimension") parser.add_argument( '--specaugment-rate', type=float, default=1.0, help="Probability to apply specaugment to a spectrogram") parser.add_argument( '--time-stretch', action='store_true', help="If set, activates time stretch on spectrograms") parser.add_argument( '--time-stretch-rate', type=float, default=1.0, help='Probability to apply time stretch to a spectrogram') parser.add_argument('--time-stretch-w', type=int, default=1, help='Window size for time stretch') parser.add_argument('--time-stretch-low', type=float, default=0.8, help='Low side of the stretch range') parser.add_argument('--time-stretch-high', type=float, default=1.25, help='High side of the stretch range') # End of speech args parser.add_argument( '--langtok-merge-strategy', default='concat', type=str, choices=['concat', 'sum'], metavar='MRG', help= 'strategy to use when merging the language token with the input, ' 'it can be \'sum\' or \'concat\'')