def evaluate(cfg: EvalConfig): device = torch.device("cuda" if cfg.model.cuda else "cpu") model = load_model(device=device, model_path=cfg.model.model_path) decoder = load_decoder(labels=model.labels, cfg=cfg.lm) target_decoder = GreedyDecoder(labels=model.labels, blank_index=model.labels.index('_')) test_dataset = SpectrogramDataset(audio_conf=model.spect_cfg, input_path=hydra.utils.to_absolute_path( cfg.test_path), labels=model.labels, normalize=True) test_loader = AudioDataLoader(test_dataset, batch_size=cfg.batch_size, num_workers=cfg.num_workers) wer, cer = run_evaluation_print(test_loader=test_loader, device=device, model=model, decoder=decoder, target_decoder=target_decoder, precision=cfg.model.precision) print('Test Summary \t' 'Average WER {wer:.3f}\t' 'Average CER {cer:.3f}\t'.format(wer=wer, cer=cer))
def evaluate(cfg: EvalConfig): device = torch.device("cuda" if cfg.model.cuda else "cpu") model = load_model(device=device, model_path=cfg.model.model_path, use_half=cfg.model.use_half) decoder = load_decoder(labels=model.labels, cfg=cfg.lm) target_decoder = GreedyDecoder(model.labels, blank_index=model.labels.index('_')) test_dataset = SpectrogramDataset( audio_conf=model.audio_conf, manifest_filepath=hydra.utils.to_absolute_path(cfg.test_manifest), labels=model.labels, normalize=True) test_loader = AudioDataLoader(test_dataset, batch_size=cfg.batch_size, num_workers=cfg.num_workers) wer, cer, output_data = run_evaluation(test_loader=test_loader, device=device, model=model, decoder=decoder, target_decoder=target_decoder, save_output=cfg.save_output, verbose=cfg.verbose, use_half=cfg.model.use_half) print('Test Summary \t' 'Average WER {wer:.3f}\t' 'Average CER {cer:.3f}\t'.format(wer=wer, cer=cer)) if cfg.save_output: torch.save(output_data, hydra.utils.to_absolute_path(cfg.save_output))
def __init__(self, cfg): self.cfg = cfg self.device = torch.device( 'cuda' if torch.cuda.is_available() else 'cpu') self.model = load_model( self.device, hydra.utils.to_absolute_path(self.cfg.model_path)) self.ckpt = torch.load(hydra.utils.to_absolute_path( self.cfg.model_path), map_location=self.device) self.labels = self.ckpt['hyper_parameters']['labels'] self.decoder = BeamCTCDecoder(labels=self.labels, lm_path=hydra.utils.to_absolute_path( self.cfg.lm_path), beam_width=self.cfg.beam_width, num_processes=self.cfg.num_workers, blank_index=self.labels.index('_')) self.target_decoder = GreedyDecoder(labels=self.labels, blank_index=self.labels.index('_')) test_dataset = SpectrogramDataset( audio_conf=self.cfg.spect_cfg, input_path=hydra.utils.to_absolute_path(cfg.test_path), labels=self.labels, normalize=True) self.test_loader = AudioDataLoader(test_dataset, batch_size=self.cfg.batch_size, num_workers=self.cfg.num_workers)
def train_dataloader(self): train_dataset = self._create_dataset(self.train_path) if self.is_distributed: train_sampler = DSElasticDistributedSampler( dataset=train_dataset, batch_size=self.data_cfg.batch_size) else: train_sampler = DSRandomSampler( dataset=train_dataset, batch_size=self.data_cfg.batch_size) train_loader = AudioDataLoader(dataset=train_dataset, num_workers=self.data_cfg.num_workers, batch_sampler=train_sampler) return train_loader
def val_dataloader(self): val_dataset = SpectrogramDataset( manifest_filepath=self.val_path, labels=self.labels, validation=True, ) val_loader = AudioDataLoader( dataset=val_dataset, num_workers=self.params.num_workers, batch_size=self.params.batch_size, shuffle=False, pin_memory=True ) return val_loader
def train_dataloader(self): train_dataset = SpectrogramDataset( manifest_filepath=self.train_path, labels=self.labels, **vars(self.params) ) train_loader = AudioDataLoader( dataset=train_dataset, num_workers=self.params.num_workers, batch_size=self.params.batch_size, shuffle=True, pin_memory=True ) return train_loader
def eval_func(model, iterations=None, device=device): test_dataset = SpectrogramDataset(audio_conf=model.audio_conf, manifest_filepath=args.test_manifest, labels=model.labels, normalize=True) if iterations is not None: test_dataset.size = iterations test_loader = AudioDataLoader(test_dataset, batch_size=args.batch_size, num_workers=args.num_workers) wer, cer, output_data = run_evaluation(test_loader=test_loader, device=device, model=model, decoder=decoder, target_decoder=target_decoder, save_output=False, verbose=True, use_half=False) return wer, cer, output_data
alpha=args.alpha, beta=args.beta, cutoff_top_n=args.cutoff_top_n, cutoff_prob=args.cutoff_prob, beam_width=args.beam_width, lm_workers=args.lm_workers) target_decoder = GreedyDecoder(labels) test_dataset = SpectrogramDataset(audio_conf=model.audio_conf, manifest_filepath=args.test_manifest, labels=labels, normalize=True) test_loader = AudioDataLoader(test_dataset, batch_size=args.batch_size, num_workers=args.num_workers) wer, cer, output_data = evaluate(test_loader=test_loader, device=device, model=model, decoder=decoder, target_decoder=target_decoder, save_output=args.save_output, verbose=args.verbose, half=args.half) print('Test Summary \t' 'Average WER {wer:.3f}\t' 'Average CER {cer:.3f}\t'.format(wer=wer, cer=cer))
def train(cfg): config = dict( epochs=cfg.training.epochs, batch_size=cfg.data.batch_size, learning_rate=cfg.optim.learning_rate, rnn_type=cfg.model.rnn_type, hidden_size=cfg.model.hidden_size, hidden_layers=cfg.model.hidden_layers, optimizer=cfg.optim, # update_hessian=cfg.optim.update_each ) wandb.init(project="adahessian-deepspeech", config=config) # Set seeds for determinism torch.manual_seed(cfg.training.seed) torch.cuda.manual_seed_all(cfg.training.seed) np.random.seed(cfg.training.seed) random.seed(cfg.training.seed) torch.backends.cudnn.flags(enabled=False) main_proc = True device = torch.device("cpu" if cfg.training.no_cuda else "cuda") is_distributed = os.environ.get( "LOCAL_RANK") # If local rank exists, distributed env if is_distributed: # when using NCCL, on failures, surviving nodes will deadlock on NCCL ops # because NCCL uses a spin-lock on the device. Set this env var and # to enable a watchdog thread that will destroy stale NCCL communicators os.environ["NCCL_BLOCKING_WAIT"] = "1" device_id = int(os.environ["LOCAL_RANK"]) torch.cuda.set_device(device_id) print(f"Setting CUDA Device to {device_id}") dist.init_process_group(backend=cfg.training.dist_backend.value) main_proc = device_id == 0 # Main process handles saving of models and reporting if OmegaConf.get_type(cfg.checkpointing) == FileCheckpointConfig: checkpoint_handler = FileCheckpointHandler(cfg=cfg.checkpointing) elif OmegaConf.get_type(cfg.checkpointing) == GCSCheckpointConfig: checkpoint_handler = GCSCheckpointHandler(cfg=cfg.checkpointing) else: raise ValueError("Checkpoint Config has not been specified correctly.") if main_proc and cfg.visualization.visdom: visdom_logger = VisdomLogger(id=cfg.visualization.id, num_epochs=cfg.training.epochs) if main_proc and cfg.visualization.tensorboard: tensorboard_logger = TensorBoardLogger( id=cfg.visualization.id, log_dir=to_absolute_path(cfg.visualization.log_dir), log_params=cfg.visualization.log_params) if cfg.checkpointing.load_auto_checkpoint: latest_checkpoint = checkpoint_handler.find_latest_checkpoint() if latest_checkpoint: cfg.checkpointing.continue_from = latest_checkpoint if cfg.checkpointing.continue_from: # Starting from previous model state = TrainingState.load_state( state_path=to_absolute_path(cfg.checkpointing.continue_from)) model = state.model if cfg.training.finetune: state.init_finetune_states(cfg.training.epochs) if main_proc and cfg.visualization.visdom: # Add previous scores to visdom graph visdom_logger.load_previous_values(state.epoch, state.results) if main_proc and cfg.visualization.tensorboard: # Previous scores to tensorboard logs tensorboard_logger.load_previous_values(state.epoch, state.results) else: # Initialise new model training with open(to_absolute_path(cfg.data.labels_path)) as label_file: labels = json.load(label_file) if OmegaConf.get_type(cfg.model) is BiDirectionalConfig: model = DeepSpeech( rnn_hidden_size=cfg.model.hidden_size, nb_layers=cfg.model.hidden_layers, labels=labels, rnn_type=supported_rnns[cfg.model.rnn_type.value], audio_conf=cfg.data.spect, bidirectional=True) elif OmegaConf.get_type(cfg.model) is UniDirectionalConfig: model = DeepSpeech( rnn_hidden_size=cfg.model.hidden_size, nb_layers=cfg.model.hidden_layers, labels=labels, rnn_type=supported_rnns[cfg.model.rnn_type.value], audio_conf=cfg.data.spect, bidirectional=False, context=cfg.model.lookahead_context) else: raise ValueError("Model Config has not been specified correctly.") state = TrainingState(model=model) state.init_results_tracking(epochs=cfg.training.epochs) # Data setup evaluation_decoder = GreedyDecoder( model.labels) # Decoder used for validation train_dataset = SpectrogramDataset(audio_conf=model.audio_conf, manifest_filepath=to_absolute_path( cfg.data.train_manifest), labels=model.labels, normalize=True, augmentation_conf=cfg.data.augmentation) test_dataset = SpectrogramDataset(audio_conf=model.audio_conf, manifest_filepath=to_absolute_path( cfg.data.val_manifest), labels=model.labels, normalize=True) if not is_distributed: train_sampler = DSRandomSampler(dataset=train_dataset, batch_size=cfg.data.batch_size, start_index=state.training_step) else: train_sampler = DSElasticDistributedSampler( dataset=train_dataset, batch_size=cfg.data.batch_size, start_index=state.training_step) train_loader = AudioDataLoader(dataset=train_dataset, num_workers=cfg.data.num_workers, batch_sampler=train_sampler) test_loader = AudioDataLoader(dataset=test_dataset, num_workers=cfg.data.num_workers, batch_size=cfg.data.batch_size) model = model.to(device) parameters = model.parameters() if OmegaConf.get_type(cfg.optim) is SGDConfig: optimizer = torch.optim.SGD(parameters, lr=cfg.optim.learning_rate, momentum=cfg.optim.momentum, nesterov=True, weight_decay=cfg.optim.weight_decay) elif OmegaConf.get_type(cfg.optim) is AdamConfig: optimizer = torch.optim.AdamW(parameters, lr=cfg.optim.learning_rate, betas=cfg.optim.betas, eps=cfg.optim.eps, weight_decay=cfg.optim.weight_decay) elif OmegaConf.get_type(cfg.optim) is AdaHessianConfig: optimizer = AdaHessian( parameters, lr=cfg.optim.learning_rate, betas=cfg.optim.betas, eps=cfg.optim.eps, weight_decay=cfg.optim.weight_decay, update_each=cfg.optim.update_each, # average_conv_kernel=cfg.optim.average_conv_kernel, # hessian_power=cfg.optim.hessian_power ) torch.backends.cudnn.enabled = False else: raise ValueError("Optimizer has not been specified correctly.") if OmegaConf.get_type(cfg.optim) is not AdaHessianConfig: model, optimizer = amp.initialize(model, optimizer, enabled=not cfg.training.no_cuda, opt_level=cfg.apex.opt_level, loss_scale=cfg.apex.loss_scale) if state.optim_state is not None: optimizer.load_state_dict(state.optim_state) if state.amp_state is not None: amp.load_state_dict(state.amp_state) # Track states for optimizer/amp state.track_optim_state(optimizer) if not cfg.training.no_cuda and OmegaConf.get_type( cfg.optim) is not AdaHessianConfig: state.track_amp_state(amp) if is_distributed: model = DistributedDataParallel(model, device_ids=[device_id]) print(model) print("Number of parameters: %d" % DeepSpeech.get_param_size(model)) criterion = CTCLoss() batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() for epoch in range(state.epoch, cfg.training.epochs): model.train() end = time.time() start_epoch_time = time.time() state.set_epoch(epoch=epoch) train_sampler.set_epoch(epoch=epoch) train_sampler.reset_training_step(training_step=state.training_step) for i, (data) in enumerate(train_loader, start=state.training_step): state.set_training_step(training_step=i) inputs, targets, input_percentages, target_sizes = data input_sizes = input_percentages.mul_(int(inputs.size(3))).int() # measure data loading time data_time.update(time.time() - end) inputs = inputs.to(device) out, output_sizes = model(inputs, input_sizes) out = out.transpose(0, 1) # TxNxH float_out = out.float() # ensure float32 for loss loss = criterion(float_out, targets, output_sizes, target_sizes).to(device) loss = loss / inputs.size(0) # average the loss by minibatch loss_value = loss.item() # Check to ensure valid loss was calculated valid_loss, error = check_loss(loss, loss_value) if valid_loss: optimizer.zero_grad() # compute gradient if OmegaConf.get_type(cfg.optim) is AdaHessianConfig: loss.backward(create_graph=True) else: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), cfg.optim.max_norm) optimizer.step() else: print(error) print('Skipping grad update') loss_value = 0 state.avg_loss += loss_value losses.update(loss_value, inputs.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() print('Epoch: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format( (epoch + 1), (i + 1), len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses)) if main_proc and cfg.checkpointing.checkpoint_per_iteration: checkpoint_handler.save_iter_checkpoint_model(epoch=epoch, i=i, state=state) del loss, out, float_out state.avg_loss /= len(train_dataset) epoch_time = time.time() - start_epoch_time print('Training Summary Epoch: [{0}]\t' 'Time taken (s): {epoch_time:.0f}\t' 'Average Loss {loss:.3f}\t'.format(epoch + 1, epoch_time=epoch_time, loss=state.avg_loss)) with torch.no_grad(): wer, cer, output_data = run_evaluation( test_loader=test_loader, device=device, model=model, decoder=evaluation_decoder, target_decoder=evaluation_decoder) state.add_results(epoch=epoch, loss_result=state.avg_loss, wer_result=wer, cer_result=cer) print('Validation Summary Epoch: [{0}]\t' 'Average WER {wer:.3f}\t' 'Average CER {cer:.3f}\t'.format(epoch + 1, wer=wer, cer=cer)) if main_proc and cfg.visualization.visdom: visdom_logger.update(epoch, state.result_state) if main_proc and cfg.visualization.tensorboard: tensorboard_logger.update(epoch, state.result_state, model.named_parameters()) if main_proc and cfg.visualization.wandb: wandb.log({ 'epoch': epoch, 'Average Loss': state.avg_loss, 'Average WER': wer, 'Average CER': cer }) if main_proc and cfg.checkpointing.checkpoint: # Save epoch checkpoint checkpoint_handler.save_checkpoint_model(epoch=epoch, state=state) # anneal lr for g in optimizer.param_groups: g['lr'] = g['lr'] / cfg.optim.learning_anneal print('Learning rate annealed to: {lr:.6f}'.format(lr=g['lr'])) wandb.log({"lr": g['lr']}) if main_proc and (state.best_wer is None or state.best_wer > wer): checkpoint_handler.save_best_model(epoch=epoch, state=state) state.set_best_wer(wer) state.reset_avg_loss() state.reset_training_step() # Reset training step for next epoch
def train(cfg): # Set seeds for determinism torch.manual_seed(cfg.training.seed) torch.cuda.manual_seed_all(cfg.training.seed) np.random.seed(cfg.training.seed) random.seed(cfg.training.seed) main_proc = True device = torch.device("cpu" if cfg.training.no_cuda else "cuda") is_distributed = os.environ.get( "LOCAL_RANK") # If local rank exists, distributed env if is_distributed: # when using NCCL, on failures, surviving nodes will deadlock on NCCL ops # because NCCL uses a spin-lock on the device. Set this env var and # to enable a watchdog thread that will destroy stale NCCL communicators os.environ["NCCL_BLOCKING_WAIT"] = "1" device_id = int(os.environ["LOCAL_RANK"]) torch.cuda.set_device(device_id) print(f"Setting CUDA Device to {device_id}") dist.init_process_group(backend=cfg.training.dist_backend.value) main_proc = device_id == 0 # Main process handles saving of models and reporting if OmegaConf.get_type(cfg.checkpointing) == FileCheckpointConfig: checkpoint_handler = FileCheckpointHandler(cfg=cfg.checkpointing) elif OmegaConf.get_type(cfg.checkpointing) == GCSCheckpointConfig: checkpoint_handler = GCSCheckpointHandler(cfg=cfg.checkpointing) else: raise ValueError("Checkpoint Config has not been specified correctly.") if main_proc and cfg.visualization.visdom: visdom_logger = VisdomLogger(id=cfg.visualization.id, num_epochs=cfg.training.epochs) if main_proc and cfg.visualization.tensorboard: tensorboard_logger = TensorBoardLogger( id=cfg.visualization.id, log_dir=to_absolute_path(cfg.visualization.log_dir), log_params=cfg.visualization.log_params) if cfg.checkpointing.load_auto_checkpoint: latest_checkpoint = checkpoint_handler.find_latest_checkpoint() if latest_checkpoint: cfg.checkpointing.continue_from = latest_checkpoint if cfg.checkpointing.continue_from: # Starting from previous model state = TrainingState.load_state( state_path=to_absolute_path(cfg.checkpointing.continue_from)) model = state.model if cfg.training.finetune: state.init_finetune_states(cfg.training.epochs) if main_proc and cfg.visualization.visdom: # Add previous scores to visdom graph visdom_logger.load_previous_values(state.epoch, state.results) if main_proc and cfg.visualization.tensorboard: # Previous scores to tensorboard logs tensorboard_logger.load_previous_values(state.epoch, state.results) else: # Initialise new model training with open(to_absolute_path(cfg.data.labels_path)) as label_file: labels = json.load(label_file) # #cấu hình của model trong file train_config.py dòng 51 # @dataclass # class BiDirectionalConfig: # rnn_type: RNNType = RNNType.lstm # Type of RNN to use in model # hidden_size: int = 1024 # Hidden size of RNN Layer # hidden_layers: int = 7 # Number of RNN layers if OmegaConf.get_type(cfg.model) is BiDirectionalConfig: model = DeepSpeech( rnn_hidden_size=cfg.model.hidden_size, nb_layers=cfg.model.hidden_layers, labels=labels, rnn_type=supported_rnns[cfg.model.rnn_type.value], audio_conf=cfg.data.spect, bidirectional=True) elif OmegaConf.get_type(cfg.model) is UniDirectionalConfig: model = DeepSpeech( rnn_hidden_size=cfg.model.hidden_size, nb_layers=cfg.model.hidden_layers, labels=labels, rnn_type=supported_rnns[cfg.model.rnn_type.value], audio_conf=cfg.data.spect, bidirectional=False, context=cfg.model.lookahead_context) else: raise ValueError("Model Config has not been specified correctly.") state = TrainingState(model=model) state.init_results_tracking(epochs=cfg.training.epochs) # Data setup evaluation_decoder = GreedyDecoder( model.labels) # Decoder used for validation train_dataset = SpectrogramDataset( audio_conf=model.audio_conf, manifest_filepath=to_absolute_path(cfg.data.train_manifest), labels=model.labels, normalize=True, augmentation_conf=cfg.data.augmentation ) #cấu hình spect, ids=[[dòng 1],[dognf 2]..], lables_=dict test_dataset = SpectrogramDataset(audio_conf=model.audio_conf, manifest_filepath=to_absolute_path( cfg.data.val_manifest), labels=model.labels, normalize=True) if not is_distributed: train_sampler = DSRandomSampler( dataset=train_dataset, batch_size=cfg.data.batch_size, start_index=state.training_step ) #DSRandomSampler để chọn 1 bộ minibatch bất kì và xáo trộn nội dung trong minibatch else: train_sampler = DSElasticDistributedSampler( dataset=train_dataset, batch_size=cfg.data.batch_size, start_index=state.training_step) train_loader = AudioDataLoader( dataset=train_dataset, num_workers=cfg.data.num_workers, batch_sampler=train_sampler ) #AudioLoader có hàm collate_fn để xử lí 1 minibatch được chọn, trả ra cuối cùng là mảng có 835 phần tử(đối với FPT, VIVOS), mỗi phần tử của audio loader là 1 mảng gồm batch_size mẫu test_loader = AudioDataLoader(dataset=test_dataset, num_workers=cfg.data.num_workers, batch_size=cfg.data.batch_size) model = model.to(device) parameters = model.parameters() if OmegaConf.get_type( cfg.optim) is SGDConfig: #mặc định ở dòng 8 trong train_config optimizer = torch.optim.SGD(parameters, lr=cfg.optim.learning_rate, momentum=cfg.optim.momentum, nesterov=True, weight_decay=cfg.optim.weight_decay) elif OmegaConf.get_type(cfg.optim) is AdamConfig: optimizer = torch.optim.AdamW(parameters, lr=cfg.optim.learning_rate, betas=cfg.optim.betas, eps=cfg.optim.eps, weight_decay=cfg.optim.weight_decay) else: raise ValueError("Optimizer has not been specified correctly.") model, optimizer = amp.initialize(model, optimizer, enabled=not cfg.training.no_cuda, opt_level=cfg.apex.opt_level, loss_scale=cfg.apex.loss_scale) if state.optim_state is not None: optimizer.load_state_dict(state.optim_state) if state.amp_state is not None: amp.load_state_dict(state.amp_state) # Track states for optimizer/amp state.track_optim_state(optimizer) if not cfg.training.no_cuda: state.track_amp_state(amp) if is_distributed: model = DistributedDataParallel(model, device_ids=[device_id]) #print(model) print("Number of parameters: %d" % DeepSpeech.get_param_size(model)) criterion = CTCLoss() batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() for epoch in range( state.epoch, cfg.training.epochs): #1 epoch là duyệt hết cả csv của train model.train() end = time.time() start_epoch_time = time.time() state.set_epoch(epoch=epoch) train_sampler.set_epoch(epoch=epoch) train_sampler.reset_training_step(training_step=state.training_step) for i, (data) in enumerate(train_loader, start=state.training_step ): #duyệt qua từng minibatch (gồm 32 mẫu) state.set_training_step(training_step=i) inputs, targets, input_percentages, target_sizes = data #inputs[x][0] chứ spect thứ x trong batchsixe, input_percenttages: tỉ lệ độ dài câu từng câu nói trong minibatch/độ dài max, target: array [[...mã ascii]] input_sizes = input_percentages.mul_(int(inputs.size(3))).int( ) #tensor([699, 682, 656, 560, 553, 517, 514, 502, 464, 458, 423, 412, 406, 349, ...] laayss tỉ lệ X độ dài max để ra độ dài thực sự từng câu nói # measure data loading time data_time.update(time.time() - end) inputs = inputs.to(device) #đưa inputs gồm 32 mẫu qua mô hình học sâu, và kích thước thực sự từng câu nói (độ dài bước thời gian phổ) out, output_sizes = model( inputs, input_sizes ) #như out: 3 chiều, outputsize: 1 chiều : chứa kích thước mô hình dự đoán cho văn bản kq out = out.transpose( 0, 1 ) # TxNxH sau khi tranpose : out 3 chiều (bị đổi chiều 0 và 1)=> out (190x3x93) float_out = out.float() # ensure float32 for loss loss = criterion(float_out, targets, output_sizes, target_sizes).to(device) loss = loss / inputs.size( 0 ) # average the loss by minibatch, tổng loss chia cho số spect trong batch đó loss_value = loss.item() # Check to ensure valid loss was calculated valid_loss, error = check_loss(loss, loss_value) if valid_loss: optimizer.zero_grad() # compute gradient, SGD chuẩn hóa SGD cập nhật trọng số with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), cfg.optim.max_norm) optimizer.step() else: print(error) print('Skipping grad update') loss_value = 0 state.avg_loss += loss_value losses.update(loss_value, inputs.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() print('Epoch: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format( (epoch + 1), (i + 1), len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses)) if main_proc and cfg.checkpointing.checkpoint_per_iteration: checkpoint_handler.save_iter_checkpoint_model(epoch=epoch, i=i, state=state) del loss, out, float_out state.avg_loss /= len(train_dataset) epoch_time = time.time() - start_epoch_time print('Training Summary Epoch: [{0}]\t' 'Time taken (s): {epoch_time:.0f}\t' 'Average Loss {loss:.3f}\t'.format(epoch + 1, epoch_time=epoch_time, loss=state.avg_loss)) mylogg2er.info('Training Summary Epoch: [{0}]\t' 'Time taken (s): {epoch_time:.0f}\t' 'Average Loss {loss:.3f}\n'.format( epoch + 1, epoch_time=epoch_time, loss=state.avg_loss)) file_object = open('/root/epoch.log', 'a') file_object.write('Training Summary Epoch: [{0}]\t' 'Time taken (s): {epoch_time:.0f}\t' 'Average Loss {loss:.3f}\n'.format( epoch + 1, epoch_time=epoch_time, loss=state.avg_loss)) file_object.close() with torch.no_grad(): wer, cer, output_data, wer2, cer2 = run_evaluation( test_loader=test_loader, device=device, model=model, decoder=evaluation_decoder, target_decoder=evaluation_decoder) state.add_results(epoch=epoch, loss_result=state.avg_loss, wer_result=wer, cer_result=cer) print('Validation Summary Epoch: [{0}]\t' 'Average WER {wer:.3f}\t' 'Average CER {cer:.3f}\t'.format(epoch + 1, wer=wer, cer=cer)) # mylogg2er.info('Validation Summary Epoch: [{0}]\t' # 'Average WER {wer:.3f}\t' # 'Average CER {cer:.3f}\t'.format(epoch + 1, wer=wer, cer=cer)) file_object = open('/root/epoch.log', 'a') file_object.write('Validation Summary Epoch: [{0}]\t' 'Average WER {wer:.3f}\t' 'Average CER {cer:.3f}\t'.format(epoch + 1, wer=wer, cer=cer)) file_object.write('Validation Summary Epoch: [{0}]\t' 'Average WER2 {wer:.3f}\t' 'Average CER2 {cer:.3f}\n'.format(epoch + 1, wer=wer2, cer=cer2)) file_object.close() if main_proc and cfg.visualization.visdom: visdom_logger.update(epoch, state.result_state) if main_proc and cfg.visualization.tensorboard: tensorboard_logger.update(epoch, state.result_state, model.named_parameters()) if main_proc and cfg.checkpointing.checkpoint: # Save epoch checkpoint checkpoint_handler.save_checkpoint_model(epoch=epoch, state=state) # anneal lr for g in optimizer.param_groups: g['lr'] = g['lr'] / cfg.optim.learning_anneal print('Learning rate annealed to: {lr:.6f}'.format(lr=g['lr'])) file_object = open('/root/epoch.log', 'a') file_object.write( 'Learning rate annealed to: {lr:.6f}\n'.format(lr=g['lr'])) file_object.close() try: # print('Training Summary Epoch: [{0}]\t' # 'Time taken (s): {epoch_time:.0f}\t' # 'Average Loss {loss:.3f}\t'.format(epoch + 1, epoch_time=epoch_time, loss=state.avg_loss))///////// note = "Đổi tham số train_config: type: rnn.gru epochs: int = 50, batch_size: int = 30, hidden_size: int = 1600, hidden_layers: int = 7, file train_manifest: vinfptunk_train.csv, vinfptunk_dev.csv", sendReport(epoch + 1, '{:.3f}'.format(epoch_time), '{:.3f}'.format(state.avg_loss), '{:.3f}'.format(wer), '{:.3f}'.format(cer), "{:.6f}".format(g['lr']), note) except Exception as esss: print('Error :', esss) if main_proc and (state.best_wer is None or state.best_wer > wer): checkpoint_handler.save_best_model(epoch=epoch, state=state) state.set_best_wer(wer) state.reset_avg_loss() state.reset_training_step() # Reset training step for next epoch
def normalize_tensor(x): return x / x.sum() if __name__ == '__main__': LABELS = ["_", "'", "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", " " ] path_input = '/home/coml/Documents/Victoria/noise_classifier/deepspeech_model/data/CommonVoice_dataset/train' test_dataset = SpectrogramDataset(audio_conf=SpectConfig(), input_path=path_input, labels=LABELS) test_loader = AudioDataLoader(dataset=test_dataset) model = DeepSpeech(labels=LABELS, precision=32, spect_cfg=SpectConfig(), optim_cfg=AdamConfig(), model_cfg=BiDirectionalConfig()) # args: 'labels', 'model_cfg', 'precision', 'optim_cfg', and 'spect_cfg' # im = test_loader[0] NUM_CLASSES = 29 # Corresponds to the length of the labels # layer = nn.Linear() for i, data in enumerate(test_loader, 0): # print('DATA \n', data[1], '\n', data[2], '\n', data[3], '\n') print('\n Sample {}: input length {}'.format(i, data[3])) out, length = model.forward(data[0], data[3]) print(' Outputs shapes: {} {}'.format(out.shape, length)) print('Final output', out) """
def representations_extractor( layer: str, # dataset_name: str, # destination_path: str, # checkpoint: str, device: str, cfg: DeepSpeechConfig): seed_everything(cfg.seed) # Load the Labels (USELESS UP TO NOW) with open(to_absolute_path(cfg.data.labels_path)) as label_file: labels = json.load(label_file) print('Loaded Labels', labels) # Load the checkpoint if cfg.trainer.checkpoint_callback: if OmegaConf.get_type(cfg.checkpoint) is GCSCheckpointConfig: checkpoint_callback = GCSCheckpointHandler(cfg=cfg.checkpoint) else: checkpoint_callback = FileCheckpointHandler(cfg=cfg.checkpoint) if cfg.load_auto_checkpoint: resume_from_checkpoint = checkpoint_callback.find_latest_checkpoint( ) if resume_from_checkpoint: cfg.trainer.resume_from_checkpoint = resume_from_checkpoint # Define the dataloader print('Load the Dataset ...') dataset = SpectrogramDataset(audio_conf=cfg.data.spect, input_path=to_absolute_path( cfg.data.train_path), labels=labels, normalize=True, aug_cfg=cfg.data.augmentation) is_distributed = False if is_distributed: sampler = DSElasticDistributedSampler(dataset=dataset, batch_size=1) else: sampler = DSRandomSampler(dataset=dataset, batch_size=1) loader = AudioDataLoader(dataset=dataset, shuffle=False, num_workers=cfg.data.num_workers, batch_sampler=sampler) # Load the model model = DeepSpeech(labels=labels, model_cfg=cfg.model, optim_cfg=cfg.optim, precision=cfg.trainer.precision, spect_cfg=cfg.data.spect) # model.load_state_dict(torch.load(checkpoint)) # Compute intermediate representations for i, batch in enumerate(loader, 0): with torch.no_grad(): inputs, targets, input_percentages, target_sizes = batch input_sizes = input_percentages.mul_(int(inputs.size(3))).int() # device = torch.cuda.device(device) # inputs = inputs.to(device) out, output_sizes = model.intermediate_forward( inputs, input_sizes, layer) if i == 0: print('Layer {} output shape: {}'.format(layer, out.shape)) # Reshape the outputs new_outputs = reshape_outputs(out, layer) # Save the representations dataset_name = 'freesound_train_curated' destination_path = '/scratch2/vbrami/deepspeech_extractions/freesound_outputs' save_outputs(new_outputs, layer, i, destination_path, dataset_name) return None
def train(cfg): # 결과를 저장하기 위한 txt파일 초기화 with open( "/home/jhjeong/jiho_deep/deepspeech.pytorch/jiho_result/result.txt", "w") as ff: ff.write("학습 시작! \n") # Set seeds for determinism torch.manual_seed(cfg.training.seed) torch.cuda.manual_seed_all(cfg.training.seed) np.random.seed(cfg.training.seed) random.seed(cfg.training.seed) main_proc = True device = torch.device("cpu" if cfg.training.no_cuda else "cuda") is_distributed = os.environ.get( "LOCAL_RANK") # If local rank exists, distributed env if is_distributed: # when using NCCL, on failures, surviving nodes will deadlock on NCCL ops # because NCCL uses a spin-lock on the device. Set this env var and # to enable a watchdog thread that will destroy stale NCCL communicators os.environ["NCCL_BLOCKING_WAIT"] = "1" device_id = int(os.environ["LOCAL_RANK"]) torch.cuda.set_device(device_id) print(f"Setting CUDA Device to {device_id}") dist.init_process_group(backend=cfg.training.dist_backend) main_proc = device_id == 0 # Main process handles saving of models and reporting checkpoint_handler = CheckpointHandler( save_folder=to_absolute_path(cfg.checkpointing.save_folder), best_val_model_name=cfg.checkpointing.best_val_model_name, checkpoint_per_iteration=cfg.checkpointing.checkpoint_per_iteration, save_n_recent_models=cfg.checkpointing.save_n_recent_models) #visdom 사용할건지 tensorboard 사용할건지 if main_proc and cfg.visualization.visdom: visdom_logger = VisdomLogger(id=cfg.visualization.id, num_epochs=cfg.training.epochs) if main_proc and cfg.visualization.tensorboard: tensorboard_logger = TensorBoardLogger( id=cfg.visualization.id, log_dir=to_absolute_path(cfg.visualization.log_dir), log_params=cfg.visualization.log_params) if cfg.checkpointing.load_auto_checkpoint: latest_checkpoint = checkpoint_handler.find_latest_checkpoint() if latest_checkpoint: cfg.checkpointing.continue_from = latest_checkpoint # 여기서 부터 if cfg.checkpointing.continue_from: # Starting from previous model state = TrainingState.load_state( state_path=to_absolute_path(cfg.checkpointing.continue_from)) model = state.model if cfg.training.finetune: state.init_finetune_states(cfg.training.epochs) if main_proc and cfg.visualization.visdom: # Add previous scores to visdom graph visdom_logger.load_previous_values(state.epoch, state.results) if main_proc and cfg.visualization.tensorboard: # Previous scores to tensorboard logs tensorboard_logger.load_previous_values(state.epoch, state.results) else: # Initialise new model training with open(to_absolute_path(cfg.data.labels_path)) as label_file: labels = json.load(label_file) # label(a,b,c ...) audio_conf = dict(sample_rate=cfg.data.sample_rate, window_size=cfg.data.window_size, window_stride=cfg.data.window_stride, window=cfg.data.window) if cfg.augmentation.noise_dir: audio_conf += dict(noise_dir=to_absolute_path( cfg.augmentation.noise_dir), noise_prob=cfg.augmentation.noise_prob, noise_levels=(cfg.augmentation.noise_min, cfg.augmentation.noise_max)) rnn_type = cfg.model.rnn_type.lower() assert rnn_type in supported_rnns, "rnn_type should be either lstm, rnn or gru" # DeepSpeech 모델을 생성 model = DeepSpeech(rnn_hidden_size=cfg.model.hidden_size, nb_layers=cfg.model.hidden_layers, labels=labels, rnn_type=supported_rnns[rnn_type], audio_conf=audio_conf, bidirectional=cfg.model.bidirectional) state = TrainingState(model=model) state.init_results_tracking(epochs=cfg.training.epochs) # Data setup evaluation_decoder = GreedyDecoder( model.labels) # Decoder used for validation # Data path 정리 train_dataset = SpectrogramDataset( audio_conf=model.audio_conf, manifest_filepath=to_absolute_path(cfg.data.train_manifest), labels=model.labels, normalize=True, speed_volume_perturb=cfg.augmentation.speed_volume_perturb, spec_augment=cfg.augmentation.spec_augment) test_dataset = SpectrogramDataset(audio_conf=model.audio_conf, manifest_filepath=to_absolute_path( cfg.data.val_manifest), labels=model.labels, normalize=True, speed_volume_perturb=False, spec_augment=False) if not is_distributed: train_sampler = DSRandomSampler(dataset=train_dataset, batch_size=cfg.data.batch_size, start_index=state.training_step) else: train_sampler = DSElasticDistributedSampler( dataset=train_dataset, batch_size=cfg.data.batch_size, start_index=state.training_step) # data load 하는 부분 train_loader = AudioDataLoader(dataset=train_dataset, num_workers=cfg.data.num_workers, batch_sampler=train_sampler) test_loader = AudioDataLoader(dataset=test_dataset, num_workers=cfg.data.num_workers, batch_size=cfg.data.batch_size) model = model.to(device) parameters = model.parameters() if cfg.optimizer.adam: optimizer = torch.optim.AdamW(parameters, lr=cfg.optimizer.learning_rate, betas=cfg.optimizer.betas, eps=cfg.optimizer.eps, weight_decay=cfg.optimizer.weight_decay) else: optimizer = torch.optim.SGD(parameters, lr=cfg.optimizer.learning_rate, momentum=cfg.optimizer.momentum, nesterov=True, weight_decay=cfg.optimizer.weight_decay) model, optimizer = amp.initialize(model, optimizer, opt_level=cfg.apex.opt_level, loss_scale=cfg.apex.loss_scale) if state.optim_state is not None: optimizer.load_state_dict(state.optim_state) amp.load_state_dict(state.amp_state) # Track states for optimizer/amp state.track_optim_state(optimizer) state.track_amp_state(amp) if is_distributed: model = DistributedDataParallel(model, device_ids=[device_id]) print(model) print("Number of parameters: %d" % DeepSpeech.get_param_size(model)) criterion = CTCLoss() batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() for epoch in range(state.epoch, cfg.training.epochs): model.train() end = time.time() start_epoch_time = time.time() state.set_epoch(epoch=epoch) train_sampler.set_epoch(epoch=epoch) train_sampler.reset_training_step(training_step=state.training_step) #train data있는거 가져다 사용하겠다. for i, (data) in enumerate(train_loader, start=state.training_step): state.set_training_step(training_step=i) inputs, targets, input_percentages, target_sizes = data input_sizes = input_percentages.mul_(int(inputs.size(3))).int() # measure data loading time data_time.update(time.time() - end) inputs = inputs.to(device) out, output_sizes = model(inputs, input_sizes) out = out.transpose(0, 1) # TxNxH float_out = out.float() # ensure float32 for loss loss = criterion(float_out, targets, output_sizes, target_sizes).to(device) loss = loss / inputs.size(0) # average the loss by minibatch loss_value = loss.item() # Check to ensure valid loss was calculated valid_loss, error = check_loss(loss, loss_value) if valid_loss: optimizer.zero_grad() # compute gradient with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), cfg.optimizer.max_norm) optimizer.step() else: print(error) print('Skipping grad update') loss_value = 0 state.avg_loss += loss_value losses.update(loss_value, inputs.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() print('Epoch: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format( (epoch + 1), (i + 1), len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses)) if main_proc and cfg.checkpointing.checkpoint_per_iteration: checkpoint_handler.save_iter_checkpoint_model(epoch=epoch, i=i, state=state) del loss, out, float_out state.avg_loss /= len(train_dataset) epoch_time = time.time() - start_epoch_time print('Training Summary Epoch: [{0}]\t' 'Time taken (s): {epoch_time:.0f}\t' 'Average Loss {loss:.3f}\t'.format(epoch + 1, epoch_time=epoch_time, loss=state.avg_loss)) with open( "/home/jhjeong/jiho_deep/deepspeech.pytorch/jiho_result/result.txt", "a") as ff: ff.write("\n") ff.write("train -> ") ff.write("epoch : ") ff.write(str(epoch + 1)) ff.write(" loss : ") ff.write(str(state.avg_loss)) ff.write("\n") with torch.no_grad(): wer, cer, output_data = evaluate(test_loader=test_loader, device=device, model=model, decoder=evaluation_decoder, target_decoder=evaluation_decoder) state.add_results(epoch=epoch, loss_result=state.avg_loss, wer_result=wer, cer_result=cer) print('Validation Summary Epoch: [{0}]\t' 'Average CER {cer:.3f}\t'.format(epoch + 1, cer=cer)) with open( "/home/jhjeong/jiho_deep/deepspeech.pytorch/jiho_result/result.txt", "a") as ff: ff.write("\n") ff.write("val -> ") ff.write("epoch : ") ff.write(str(epoch + 1)) ff.write(" cer : ") ff.write(str(cer)) ff.write("\n") # 텐서보드에 업데이트함 if main_proc and cfg.visualization.visdom: visdom_logger.update(epoch, state.result_state) if main_proc and cfg.visualization.tensorboard: tensorboard_logger.update(epoch, state.result_state, model.named_parameters()) if main_proc and cfg.checkpointing.checkpoint: # Save epoch checkpoint checkpoint_handler.save_checkpoint_model(epoch=epoch, state=state) # anneal lr for g in optimizer.param_groups: g['lr'] = g['lr'] / cfg.optimizer.learning_anneal print('Learning rate annealed to: {lr:.6f}'.format(lr=g['lr'])) if main_proc and (state.best_wer is None or state.best_wer > wer): checkpoint_handler.save_best_model(epoch=epoch, state=state) state.set_best_wer(wer) state.reset_avg_loss() state.reset_training_step() # Reset training step for next epoch
def val_dataloader(self): val_dataset = self._create_dataset(self.val_path) val_loader = AudioDataLoader(dataset=val_dataset, num_workers=self.data_cfg.num_workers, batch_size=self.data_cfg.batch_size) return val_loader