def main(): import argparse global model, spect_parser, decoder, args parser = argparse.ArgumentParser(description='DeepSpeech transcription server') parser.add_argument('--host', type=str, default='0.0.0.0', help='Host to be used by the server') parser.add_argument('--port', type=int, default=8888, help='Port to be used by the server') parser = add_inference_args(parser) parser = add_decoder_args(parser) args = parser.parse_args() logging.getLogger().setLevel(logging.DEBUG) logging.info('Setting up server...') torch.set_grad_enabled(False) model = DeepSpeech.load_model(args.model_path) if args.cuda: model.cuda() model.eval() labels = DeepSpeech.get_labels(model) audio_conf = DeepSpeech.get_audio_conf(model) if args.decoder == "beam": from decoder import BeamCTCDecoder decoder = BeamCTCDecoder(labels, lm_path=args.lm_path, alpha=args.alpha, beta=args.beta, cutoff_top_n=args.cutoff_top_n, cutoff_prob=args.cutoff_prob, beam_width=args.beam_width, num_processes=args.lm_workers) else: decoder = GreedyDecoder(labels, blank_index=labels.index('_')) spect_parser = SpectrogramParser(audio_conf, normalize=True) logging.info('Server initialised') app.run(host=args.host, port=args.port, debug=True, use_reloader=False)
def __init__(self, model_path): """ :param model_path: """ assert os.path.exists(model_path), "Cannot find model here {}".format( model_path) self.deep_speech_model = DeepSpeech.load_model(model_path) self.deep_speech_model.eval() labels = DeepSpeech.get_labels(self.deep_speech_model) self.audio_conf = DeepSpeech.get_audio_conf(self.deep_speech_model) self.decoder = GreedyDecoder(labels) self.parser = SpectrogramParser(self.audio_conf, normalize=True)
"specified") no_decoder_args.add_argument('--output-path', default=None, type=str, help="Where to save raw acoustic output") parser = add_decoder_args(parser) args = parser.parse_args() if __name__ == '__main__': torch.set_grad_enabled(False) model = DeepSpeech.load_model(args.model_path) device = torch.device("cuda" if args.cuda else "cpu") model = model.to(device) model.eval() labels = DeepSpeech.get_labels(model) audio_conf = DeepSpeech.get_audio_conf(model) if args.decoder == "beam": from decoder import BeamCTCDecoder decoder = BeamCTCDecoder(labels, lm_path=args.lm_path, alpha=args.alpha, beta=args.beta, cutoff_top_n=args.cutoff_top_n, cutoff_prob=args.cutoff_prob, beam_width=args.beam_width, num_processes=args.lm_workers) elif args.decoder == "greedy": decoder = GreedyDecoder(labels, blank_index=labels.index('_'))
if cuda: spect = spect.cuda() input_sizes = torch.IntTensor([spect.size(3)]).int() out, output_sizes = model(spect, input_sizes) decoded_output, decoded_offsets = decoder.decode(out, output_sizes) return decoded_output, decoded_offsets if __name__ == '__main__': torch.set_grad_enabled(False) model = DeepSpeech.load_model(args.model_path) if args.cuda: model.cuda() model.eval() labels = DeepSpeech.get_labels(model) audio_conf = DeepSpeech.get_audio_conf(model) if args.decoder == "beam": from decoder import BeamCTCDecoder decoder = BeamCTCDecoder(labels, lm_path=args.lm_path, alpha=args.alpha, beta=args.beta, cutoff_top_n=args.cutoff_top_n, cutoff_prob=args.cutoff_prob, beam_width=args.beam_width, num_processes=args.lm_workers) else: decoder = GreedyDecoder(labels, blank_index=labels.index('_')) parser = SpectrogramParser(audio_conf, normalize=True) decoded_output, decoded_offsets = transcribe(args.audio_path, parser, model, decoder, args.cuda) print(json.dumps(decode_results(model, decoded_output, decoded_offsets)))
try: os.makedirs(save_folder) except OSError as e: if e.errno == errno.EEXIST: print('Model Save directory already exists.') else: raise criterion = CTCLoss() avg_loss, start_epoch, start_iter = 0, 0, 0 if args.continue_from: # Starting from previous model print("Loading checkpoint model %s" % args.continue_from) package = torch.load(args.continue_from, map_location=lambda storage, loc: storage) model_teacher = DeepSpeech.load_model_package(package) labels = DeepSpeech.get_labels(model_teacher) audio_conf = DeepSpeech.get_audio_conf(model_teacher) parameters_teacher = model_teacher.parameters() optimizer_teacher = torch.optim.SGD(parameters_teacher, lr=args.lr, momentum=args.momentum, nesterov=True) # load student model with pretrained model ''' model_student = DeepSpeech.load_model_package(package) parameters_student = model_student.parameters() optimizer_student = torch.optim.SGD(parameters_student, lr=args.lr, momentum=args.momentum, nesterov=True) ''' # restart student model from scratch
def main(): global args, train_logger, test_logger args = options.parse_args() os.makedirs(args.log_dir) test_logger = Logger(os.path.join(args.log_dir, 'test.log')) with open(os.path.join(args.log_dir, 'config.log'), 'w') as f: f.write(args.config_str) if not args.evaluate: os.makedirs(args.checkpoint_dir) train_logger = Logger(os.path.join(args.log_dir, 'train.log')) loss_results, cer_results = torch.FloatTensor( args.epochs), torch.FloatTensor(args.epochs) if args.visdom: from visdom import Visdom viz = Visdom() opts = dict(title=args.experiment_id, ylabel='', xlabel='Epoch', legend=['Loss', 'CER']) viz_windows = None epochs = torch.arange(0, args.epochs) if args.resume: print('Loading checkpoint model %s' % args.resume) checkpoint = torch.load(args.resume) model = DeepSpeech.load_model_checkpoint(checkpoint) model = torch.nn.DataParallel(model, device_ids=[i for i in range(args.nGPU) ]).cuda() labels = DeepSpeech.get_labels(model) audio_conf = DeepSpeech.get_audio_conf(model) parameters = model.parameters() optimizer = torch.optim.SGD(parameters, lr=args.lr, momentum=args.momentum, nesterov=True) optimizer.load_state_dict(checkpoint['optimizer']) start_epoch = int(checkpoint.get('epoch', 0)) # Index start at 0 for training loss_results, cer_results = checkpoint['loss_results'], checkpoint[ 'cer_results'] if args.epochs > loss_results.numel(): loss_results.resize_(args.epochs) cer_results.resize_(args.epochs) loss_results[start_epoch:].zero_() cer_results[start_epoch:].zero_() # Add previous scores to visdom graph if args.visdom and loss_results is not None: x_axis = epochs[0:start_epoch] y_axis = torch.stack( (loss_results[0:start_epoch], cer_results[0:start_epoch]), dim=1) viz_window = viz.line( X=x_axis, Y=y_axis, opts=opts, ) else: start_epoch = args.start_epoch with open(args.labels_path) as label_file: labels = str(''.join(json.load(label_file))) audio_conf = dict(sample_rate=args.sample_rate, window_size=args.window_size, window_stride=args.window_stride, window=args.window, noise_dir=args.noise_dir, noise_prob=args.noise_prob, noise_levels=(args.noise_min, args.noise_max)) model = DeepSpeech(rnn_hidden_size=args.hidden_size, nb_layers=args.hidden_layers, labels=labels, rnn_type=supported_rnns[args.rnn_type], audio_conf=audio_conf, bidirectional=not args.look_ahead) model = torch.nn.DataParallel(model, device_ids=[i for i in range(args.nGPU) ]).cuda() parameters = model.parameters() optimizer = torch.optim.SGD(parameters, lr=args.lr, momentum=args.momentum, nesterov=True) # define loss function (criterion) and decoder best_cer = None criterion = CTCLoss() decoder = GreedyDecoder(labels) # define dataloader if not args.evaluate: train_dataset = SpectrogramDataset( audio_conf=audio_conf, manifest_filepath=args.train_manifest, labels=labels, normalize=True, augment=args.augment) train_sampler = BucketingSampler(train_dataset, batch_size=args.batch_size) train_loader = AudioDataLoader(train_dataset, num_workers=args.num_workers, batch_sampler=train_sampler) if not args.in_order and start_epoch != 0: print("Shuffling batches for the following epochs") train_sampler.shuffle() val_dataset = SpectrogramDataset(audio_conf=audio_conf, manifest_filepath=args.val_manifest, labels=labels, normalize=True, augment=False) val_loader = AudioDataLoader(val_dataset, batch_size=args.batch_size, num_workers=args.num_workers) print(model) print("Number of parameters: %d" % DeepSpeech.get_param_size(model)) if args.evaluate: validate(val_loader, model, decoder, 0) return for epoch in range(start_epoch, args.epochs): avg_loss = train(train_loader, train_sampler, model, criterion, optimizer, epoch) cer = validate(val_loader, model, decoder, epoch) loss_results[epoch] = avg_loss cer_results[epoch] = cer adjust_learning_rate(optimizer) is_best = False if best_cer is None or best_cer > cer: print('Found better validated model') best_cer = cer is_best = True save_checkpoint( DeepSpeech.serialize(model, optimizer=optimizer, epoch=epoch, loss_results=loss_results, cer_results=cer_results), is_best, epoch) if not args.in_order: print("Shuffling batches...") train_sampler.shuffle() if args.visdom: x_axis = epochs[0:epoch + 1] y_axis = torch.stack( (loss_results[0:epoch + 1], cer_results[0:epoch + 1]), dim=1) if viz_window is None: viz_window = viz.line( X=x_axis, Y=y_axis, opts=opts, ) else: viz.line( X=x_axis.unsqueeze(0).expand(y_axis.size(1), x_axis.size(0)).transpose( 0, 1), # Visdom fix Y=y_axis, win=viz_window, update='replace', )
def train_main(args): args.distributed = args.world_size > 1 main_proc = True if args.distributed: if args.gpu_rank: torch.cuda.set_device(int(args.gpu_rank)) dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank) main_proc = args.rank == 0 # Only the first proc should save models save_folder = args.save_folder loss_results, cer_results, wer_results = torch.Tensor(args.epochs), torch.Tensor(args.epochs), torch.Tensor( args.epochs) best_wer = None if args.visdom and main_proc: from visdom import Visdom viz = Visdom() opts = dict(title=args.id, ylabel='', xlabel='Epoch', legend=['Loss', 'WER', 'CER']) viz_window = None epochs = torch.arange(1, args.epochs + 1) if args.tensorboard and main_proc: os.makedirs(args.log_dir, exist_ok=True) from tensorboardX import SummaryWriter tensorboard_writer = SummaryWriter(args.log_dir) os.makedirs(save_folder, exist_ok=True) avg_loss, start_epoch, start_iter = 0, 0, 0 if args.continue_from: # Starting from previous model print("Loading checkpoint model %s" % args.continue_from) package = torch.load(args.continue_from, map_location=lambda storage, loc: storage) model = DeepSpeech.load_model_package(package) labels = DeepSpeech.get_labels(model) audio_conf = DeepSpeech.get_audio_conf(model) parameters = model.parameters() optimizer = torch.optim.SGD(parameters, lr=args.lr, momentum=args.momentum, nesterov=True) if not args.finetune: # Don't want to restart training if args.cuda: model.cuda() optimizer.load_state_dict(package['optim_dict']) start_epoch = int(package.get('epoch', 1)) - 1 # Index start at 0 for training start_iter = package.get('iteration', None) if start_iter is None: start_epoch += 1 # We saved model after epoch finished, start at the next epoch. start_iter = 0 else: start_iter += 1 avg_loss = int(package.get('avg_loss', 0)) loss_results, cer_results, wer_results = package['loss_results'], package[ 'cer_results'], package['wer_results'] if main_proc and args.visdom and \ package[ 'loss_results'] is not None and start_epoch > 0: # Add previous scores to visdom graph x_axis = epochs[0:start_epoch] y_axis = torch.stack( (loss_results[0:start_epoch], wer_results[0:start_epoch], cer_results[0:start_epoch]), dim=1) viz_window = viz.line( X=x_axis, Y=y_axis, opts=opts, ) if main_proc and args.tensorboard and \ package[ 'loss_results'] is not None and start_epoch > 0: # Previous scores to tensorboard logs for i in range(start_epoch): values = { 'Avg Train Loss': loss_results[i], 'Avg WER': wer_results[i], 'Avg CER': cer_results[i] } tensorboard_writer.add_scalars(args.id, values, i + 1) else: with open(args.labels_path) as label_file: labels = str(''.join(json.load(label_file))) audio_conf = dict(sample_rate=args.sample_rate, window_size=args.window_size, window_stride=args.window_stride, window=args.window, noise_dir=args.noise_dir, noise_prob=args.noise_prob, noise_levels=(args.noise_min, args.noise_max)) rnn_type = args.rnn_type.lower() assert rnn_type in supported_rnns, "rnn_type should be either lstm, rnn or gru" model = DeepSpeech(rnn_hidden_size=args.hidden_size, nb_layers=args.hidden_layers, labels=labels, rnn_type=supported_rnns[rnn_type], audio_conf=audio_conf, bidirectional=args.bidirectional) parameters = model.parameters() optimizer = torch.optim.SGD(parameters, lr=args.lr, momentum=args.momentum, nesterov=True) criterion = CTCLoss() decoder = GreedyDecoder(labels) train_dataset = SpectrogramDataset(audio_conf=audio_conf, manifest_filepath=args.train_manifest, labels=labels, normalize=True, augment=args.augment) test_dataset = SpectrogramDataset(audio_conf=audio_conf, manifest_filepath=args.val_manifest, labels=labels, normalize=True, augment=False) if not args.distributed: train_sampler = BucketingSampler(train_dataset, batch_size=args.batch_size) else: train_sampler = DistributedBucketingSampler(train_dataset, batch_size=args.batch_size, num_replicas=args.world_size, rank=args.rank) train_loader = AudioDataLoader(train_dataset, num_workers=args.num_workers, batch_sampler=train_sampler) test_loader = AudioDataLoader(test_dataset, batch_size=args.batch_size, num_workers=args.num_workers) if (not args.no_shuffle and start_epoch != 0) or args.no_sorta_grad: print("Shuffling batches for the following epochs") train_sampler.shuffle(start_epoch) if args.cuda: model.cuda() if args.distributed: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=(int(args.gpu_rank),) if args.rank else None) print(model) print("Number of parameters: %d" % DeepSpeech.get_param_size(model)) batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() for epoch in range(start_epoch, args.epochs): model.train() end = time.time() start_epoch_time = time.time() for i, (data) in enumerate(train_loader, start=start_iter): if i == len(train_sampler): break inputs, targets, input_percentages, target_sizes = data input_sizes = input_percentages.mul_(int(inputs.size(3))).int() # measure data loading time data_time.update(time.time() - end) if args.cuda: inputs = inputs.cuda() out, output_sizes = model(inputs, input_sizes) out = out.transpose(0, 1) # TxNxH loss = criterion(out, targets, output_sizes, target_sizes) loss = loss / inputs.size(0) # average the loss by minibatch inf = float("inf") if args.distributed: loss_value = reduce_tensor(loss, args.world_size)[0] else: loss_value = loss.item() if loss_value == inf or loss_value == -inf: print("WARNING: received an inf loss, setting loss value to 0") loss_value = 0 avg_loss += loss_value losses.update(loss_value, inputs.size(0)) # compute gradient optimizer.zero_grad() loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_norm) # SGD step optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() if not args.silent: print('Epoch: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format( (epoch + 1), (i + 1), len(train_sampler), batch_time=batch_time, data_time=data_time, loss=losses)) if args.checkpoint_per_batch > 0 and i > 0 and (i + 1) % args.checkpoint_per_batch == 0 and main_proc: file_path = '%s/deepspeech_checkpoint_epoch_%d_iter_%d.pth' % (save_folder, epoch + 1, i + 1) print("Saving checkpoint model to %s" % file_path) torch.save(DeepSpeech.serialize(model, optimizer=optimizer, epoch=epoch, iteration=i, loss_results=loss_results, wer_results=wer_results, cer_results=cer_results, avg_loss=avg_loss), file_path) del loss del out avg_loss /= len(train_sampler) epoch_time = time.time() - start_epoch_time print('Training Summary Epoch: [{0}]\t' 'Time taken (s): {epoch_time:.0f}\t' 'Average Loss {loss:.3f}\t'.format(epoch + 1, epoch_time=epoch_time, loss=avg_loss)) start_iter = 0 # Reset start iteration for next epoch total_cer, total_wer = 0, 0 model.eval() with torch.no_grad(): for i, (data) in tqdm(enumerate(test_loader), total=len(test_loader)): inputs, targets, input_percentages, target_sizes = data input_sizes = input_percentages.mul_(int(inputs.size(3))).int() # unflatten targets split_targets = [] offset = 0 for size in target_sizes: split_targets.append(targets[offset:offset + size]) offset += size if args.cuda: inputs = inputs.cuda() out, output_sizes = model(inputs, input_sizes) decoded_output, _ = decoder.decode(out.data, output_sizes) target_strings = decoder.convert_to_strings(split_targets) wer, cer = 0, 0 for x in range(len(target_strings)): transcript, reference = decoded_output[x][0], target_strings[x][0] wer += decoder.wer(transcript, reference) / float(len(reference.split())) cer += decoder.cer(transcript, reference) / float(len(reference)) total_cer += cer total_wer += wer del out wer = total_wer / len(test_loader.dataset) cer = total_cer / len(test_loader.dataset) wer *= 100 cer *= 100 loss_results[epoch] = avg_loss wer_results[epoch] = wer cer_results[epoch] = cer print('Validation Summary Epoch: [{0}]\t' 'Average WER {wer:.3f}\t' 'Average CER {cer:.3f}\t'.format(epoch + 1, wer=wer, cer=cer)) if args.visdom and main_proc: x_axis = epochs[0:epoch + 1] y_axis = torch.stack( (loss_results[0:epoch + 1], wer_results[0:epoch + 1], cer_results[0:epoch + 1]), dim=1) if viz_window is None: viz_window = viz.line( X=x_axis, Y=y_axis, opts=opts, ) else: viz.line( X=x_axis.unsqueeze(0).expand(y_axis.size(1), x_axis.size(0)).transpose(0, 1), # Visdom fix Y=y_axis, win=viz_window, update='replace', ) if args.tensorboard and main_proc: values = { 'Avg Train Loss': avg_loss, 'Avg WER': wer, 'Avg CER': cer } tensorboard_writer.add_scalars(args.id, values, epoch + 1) if args.log_params: for tag, value in model.named_parameters(): tag = tag.replace('.', '/') tensorboard_writer.add_histogram(tag, to_np(value), epoch + 1) tensorboard_writer.add_histogram(tag + '/grad', to_np(value.grad), epoch + 1) if args.checkpoint and main_proc: file_path = '%s/deepspeech_%d.pth' % (save_folder, epoch + 1) torch.save(DeepSpeech.serialize(model, optimizer=optimizer, epoch=epoch, loss_results=loss_results, wer_results=wer_results, cer_results=cer_results), file_path) # anneal lr optim_state = optimizer.state_dict() optim_state['param_groups'][0]['lr'] = optim_state['param_groups'][0]['lr'] / args.learning_anneal optimizer.load_state_dict(optim_state) print('Learning rate annealed to: {lr:.6f}'.format(lr=optim_state['param_groups'][0]['lr'])) if (best_wer is None or best_wer > wer) and main_proc: print("Found better validated model, saving to %s" % args.model_path) torch.save(DeepSpeech.serialize(model, optimizer=optimizer, epoch=epoch, loss_results=loss_results, wer_results=wer_results, cer_results=cer_results), args.model_path) best_wer = wer avg_loss = 0 if not args.no_shuffle: print("Shuffling batches...") train_sampler.shuffle(epoch)