Ejemplo n.º 1
0
    def set_model(self, inference=False, with_head=False, from_path=None, output_attention=False):
        self.verbose('Initializing Mockingjay model.')
        
        # uild the Mockingjay model with speech prediction head
        self.model_config = MockingjayConfig(self.config)
        self.dr = self.model_config.downsample_rate
        self.hidden_size = self.model_config.hidden_size
        self.output_attention = output_attention
        
        if not inference or with_head:
            self.model = MockingjayForMaskedAcousticModel(self.model_config, self.input_dim, self.output_dim, self.output_attention).to(self.device)
            self.verbose('Number of parameters: ' + str(sum(p.numel() for p in self.model.parameters() if p.requires_grad)))
            self.mockingjay = self.model.Mockingjay

        if inference and not with_head:
            self.mockingjay = MockingjayModel(self.model_config, self.input_dim, self.output_attention).to(self.device)
            self.verbose('Number of parameters: ' + str(sum(p.numel() for p in self.mockingjay.parameters() if p.requires_grad)))
            self.mockingjay.eval()
        elif inference and with_head:
            self.model.eval()
        elif not inference:
            self.model.train()

            # Setup optimizer
            param_optimizer = list(self.model.named_parameters())

            no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
            optimizer_grouped_parameters = [
                {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
                {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
                ]
            num_train_optimization_steps = self.total_steps // self.gradient_accumulation_steps

            if self.apex:
                try:
                    from apex.optimizers import FP16_Optimizer
                    from apex.optimizers import FusedAdam
                except ImportError:
                    raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")

                optimizer = FusedAdam(optimizer_grouped_parameters,
                                      lr=self.learning_rate,
                                      bias_correction=False,
                                      max_grad_norm=1.0)
                if self.config['optimizer']['loss_scale'] == 0:
                    self.optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
                else:
                    self.optimizer = FP16_Optimizer(optimizer, static_loss_scale=self.config['optimizer']['loss_scale'])
                self.warmup_linear = WarmupLinearSchedule(warmup=self.warmup_proportion,
                                                          t_total=num_train_optimization_steps)
            else:
                self.optimizer = BertAdam(optimizer_grouped_parameters,
                                        lr=self.learning_rate,
                                        warmup=self.warmup_proportion,
                                        t_total=num_train_optimization_steps)
        else:
            raise NotImplementedError('Invalid Arguments!')

        if self.load: # This will be set to True by default when Tester is running set_model()
            self.load_model(inference=inference, with_head=with_head, from_path=from_path)
Ejemplo n.º 2
0
def get_mockingjay_optimizer(params, lr, warmup_proportion, training_steps):
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [
        {'params': [p for n, p in params if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
        {'params': [p for n, p in params if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
        ]
    optimizer = BertAdam(optimizer_grouped_parameters,
                         lr=lr,
                         warmup=warmup_proportion,
                         t_total=training_steps)
    return optimizer
Ejemplo n.º 3
0
class Solver():
    ''' Super class Solver for all kinds of tasks'''
    def __init__(self, config, paras):

        # General Settings
        self.config = config
        self.paras = paras
        self.device = torch.device('cuda') if (
            self.paras.gpu
            and torch.cuda.is_available()) else torch.device('cpu')
        if torch.cuda.is_available(): self.verbose('CUDA is available!')

        # path and directories
        self.exp_name = paras.name
        if self.exp_name is None:
            self.exp_name = '_'.join([
                paras.config.split('/')[-1].replace('.yaml', ''),
                'sd' + str(paras.seed)
            ])
        self.ckpdir = os.path.join(paras.ckpdir, self.exp_name)

        self.load = paras.load
        # only for test
        self.ckpt = os.path.join(paras.ckpdir, paras.ckpt)

        # model
        self.load_model_list = config['solver']['load_model_list']
        self.duo_feature = config['solver']['duo_feature']
        self.output_dim = num_freq if self.duo_feature else None  # output dim is the same as input dim if not using duo features
        if 'input_dim' in config['mockingjay']:
            self.input_dim = config['mockingjay']['input_dim']
            self.verbose('Using `input_dim` setting from config.')
        else:
            input_dim = fmllr_dim if 'fmllr' in self.config['dataloader'][
                'data_path'] else mfcc_dim if 'mfcc' in self.config[
                    'dataloader']['data_path'] else mel_dim

    def verbose(self, msg, end='\n'):
        ''' Verbose function for print information to stdout'''
        if self.paras.verbose:
            print('[SOLVER] - ', msg, end=end)

    def load_data(self, split='train', load_mel_only=False):
        ''' Load data for training / testing'''
        if split == 'train':
            self.verbose('Loading source data ' +
                         str(self.config['dataloader']['train_set']) +
                         ' from ' + self.config['dataloader']['data_path'])
            if self.duo_feature:
                self.verbose('Loading target data ' +
                             str(self.config['dataloader']['train_set']) +
                             ' from ' +
                             self.config['dataloader']['target_path'])
        elif split == 'test':
            self.verbose('Loading testing data ' +
                         str(self.config['dataloader']['test_set']) +
                         ' from ' + self.config['dataloader']['data_path'])
        else:
            raise NotImplementedError('Invalid `split` argument!')

        if self.duo_feature and not load_mel_only:
            setattr(self, 'dataloader', get_Dataloader(split, load='duo', use_gpu=self.paras.gpu, \
                    mock_config=self.config['mockingjay'], **self.config['dataloader'])) # Currently the duo feature dataloader only supports mockingjay training, no need to specify `run_mockingjay`
        else:
            setattr(self, 'dataloader', get_Dataloader(split, load='acoustic', use_gpu=self.paras.gpu, \
                    run_mockingjay=True if not load_mel_only else False, mock_config=self.config['mockingjay'], \
                    **self.config['dataloader'])) # specify `run_mockingjay` so dataloader will process mockingjay MAM data

    def set_model(self,
                  inference=False,
                  with_head=False,
                  from_path=None,
                  output_attention=False):
        self.verbose('Initializing Mockingjay model.')

        # uild the Mockingjay model with speech prediction head
        self.model_config = MockingjayConfig(self.config)
        self.dr = self.model_config.downsample_rate
        self.hidden_size = self.model_config.hidden_size
        self.with_head = with_head
        self.output_attention = output_attention

        if not inference or with_head:
            self.model = MockingjayForMaskedAcousticModel(
                self.model_config, self.input_dim, self.output_dim,
                self.output_attention).to(self.device)
            self.mockingjay = self.model.Mockingjay
            if self.paras.multi_gpu:
                self.model = torch.nn.DataParallel(self.model)
                self.mockingjay = torch.nn.DataParallel(self.mockingjay)
                self.verbose('Multi-GPU training Enabled: ' +
                             str(torch.cuda.device_count()))
            self.verbose('Number of parameters: ' + str(
                sum(p.numel()
                    for p in self.model.parameters() if p.requires_grad)))

        if inference and not with_head:
            self.mockingjay = MockingjayModel(
                self.model_config, self.input_dim,
                self.output_attention).to(self.device)
            if self.paras.multi_gpu:
                self.mockingjay = torch.nn.DataParallel(self.mockingjay)
                self.verbose('Multi-GPU training Enabled: ' +
                             str(torch.cuda.device_count()))
            self.verbose('Number of parameters: ' + str(
                sum(p.numel()
                    for p in self.mockingjay.parameters() if p.requires_grad)))
            self.mockingjay.eval()
        elif inference and with_head:
            self.model.eval()
        elif not inference:
            self.model.train()

            # Setup optimizer
            param_optimizer = list(self.model.named_parameters())

            no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
            optimizer_grouped_parameters = [{
                'params': [
                    p for n, p in param_optimizer
                    if not any(nd in n for nd in no_decay)
                ],
                'weight_decay':
                0.01
            }, {
                'params': [
                    p for n, p in param_optimizer
                    if any(nd in n for nd in no_decay)
                ],
                'weight_decay':
                0.0
            }]

            if self.apex:
                try:
                    from apex.optimizers import FP16_Optimizer
                    from apex.optimizers import FusedAdam
                except ImportError:
                    raise ImportError(
                        "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training."
                    )

                optimizer = FusedAdam(optimizer_grouped_parameters,
                                      lr=self.learning_rate,
                                      bias_correction=False,
                                      max_grad_norm=1.0)
                if self.config['optimizer']['loss_scale'] == 0:
                    self.optimizer = FP16_Optimizer(optimizer,
                                                    dynamic_loss_scale=True)
                else:
                    self.optimizer = FP16_Optimizer(
                        optimizer,
                        static_loss_scale=self.config['optimizer']
                        ['loss_scale'])
                self.warmup_linear = WarmupLinearSchedule(
                    warmup=self.warmup_proportion, t_total=self.total_steps)
            else:
                self.optimizer = BertAdam(optimizer_grouped_parameters,
                                          lr=self.learning_rate,
                                          warmup=self.warmup_proportion,
                                          t_total=self.total_steps)
        else:
            raise NotImplementedError('Invalid Arguments!')

        if self.load:  # This will be set to True by default when Tester is running set_model()
            self.load_model(inference=inference,
                            with_head=with_head,
                            from_path=from_path)

    def save_model(self, name, model_all=True):
        if model_all:
            all_states = {
                'SpecHead':
                self.model.SpecHead.state_dict() if not self.paras.multi_gpu
                else self.model.module.SpecHead.state_dict(),
                'Mockingjay':
                self.mockingjay.state_dict() if not self.paras.multi_gpu else
                self.mockingjay.module.state_dict(),
                'Optimizer':
                self.optimizer.state_dict(),
                'Global_step':
                self.global_step,
                'Settings': {
                    'Config': self.config,
                    'Paras': self.paras,
                },
            }
        else:
            all_states = {
                'Mockingjay':
                self.mockingjay.state_dict() if not self.paras.multi_gpu else
                self.mockingjay.module.state_dict(),
                'Settings': {
                    'Config': self.config,
                    'Paras': self.paras,
                },
            }
        new_model_path = '{}/{}-{}.ckpt'.format(self.ckpdir, name,
                                                self.global_step)
        torch.save(all_states, new_model_path)
        self.model_kept.append(new_model_path)

        if len(self.model_kept) >= self.max_keep:
            os.remove(self.model_kept[0])
            self.model_kept.pop(0)

    def load_model(self, inference=False, with_head=False, from_path=None):
        if from_path is not None:
            self.verbose('Load model from {}'.format(from_path))
            all_states = torch.load(from_path, map_location='cpu')
            self.load_model_list = ['Mockingjay']
        else:
            self.verbose('Load model from {}'.format(self.ckpt))
            all_states = torch.load(self.ckpt, map_location='cpu')

        if 'SpecHead' in self.load_model_list:
            if not inference or with_head:
                try:
                    if not self.paras.multi_gpu:
                        self.model.SpecHead.load_state_dict(
                            all_states['SpecHead'])
                    else:
                        self.model.module.SpecHead.load_state_dict(
                            all_states['SpecHead'])
                    self.verbose('[SpecHead] - Loaded')
                except:
                    self.verbose('[SpecHead - X]')

        if 'Mockingjay' in self.load_model_list:
            try:
                state_dict = all_states['Mockingjay']
                # Load from a PyTorch state_dict
                old_keys = []
                new_keys = []
                for key in state_dict.keys():
                    new_key = None
                    if 'gamma' in key:
                        new_key = key.replace('gamma', 'weight')
                    if 'beta' in key:
                        new_key = key.replace('beta', 'bias')
                    if new_key:
                        old_keys.append(key)
                        new_keys.append(new_key)
                for old_key, new_key in zip(old_keys, new_keys):
                    state_dict[new_key] = state_dict.pop(old_key)

                missing_keys = []
                unexpected_keys = []
                error_msgs = []
                # copy state_dict so _load_from_state_dict can modify it
                metadata = getattr(state_dict, '_metadata', None)
                state_dict = state_dict.copy()
                if metadata is not None:
                    state_dict._metadata = metadata

                def load(module, prefix=''):
                    local_metadata = {} if metadata is None else metadata.get(
                        prefix[:-1], {})
                    module._load_from_state_dict(state_dict, prefix,
                                                 local_metadata, True,
                                                 missing_keys, unexpected_keys,
                                                 error_msgs)
                    for name, child in module._modules.items():
                        if child is not None:
                            load(child, prefix + name + '.')

                # perform load
                if not self.paras.multi_gpu:
                    load(self.mockingjay)
                else:
                    load(self.mockingjay.module)

                if len(missing_keys) > 0:
                    self.verbose(
                        "Weights of {} not initialized from pretrained model: {}"
                        .format(self.mockingjay.__class__.__name__,
                                missing_keys))
                if len(unexpected_keys) > 0:
                    self.verbose(
                        "Weights from pretrained model not used in {}: {}".
                        format(self.mockingjay.__class__.__name__,
                               unexpected_keys))
                if len(error_msgs) > 0:
                    raise RuntimeError(
                        'Error(s) in loading state_dict for {}:\n\t{}'.format(
                            self.mockingjay.__class__.__name__,
                            "\n\t".join(error_msgs)))
                self.verbose('[Mockingjay] - Loaded')
            except:
                self.verbose('[Mockingjay - X]')

        if 'Optimizer' in self.load_model_list and not inference:
            try:
                self.optimizer.load_state_dict(all_states['Optimizer'])
                for state in self.optimizer.state.values():
                    for k, v in state.items():
                        if torch.is_tensor(v):
                            state[k] = v.cuda()
                self.verbose('[Optimizer] - Loaded')
            except:
                self.verbose('[Optimizer - X]')

        if 'Global_step' in self.load_model_list and not inference:
            try:
                self.global_step = all_states['Global_step']
                self.verbose('[Global_step] - Loaded')
            except:
                self.verbose('[Global_step - X]')

        self.verbose('Model loading complete!')

    def up_sample_frames(self, spec, return_first=False):
        if len(spec.shape) != 3:
            spec = spec.unsqueeze(0)
            assert (len(spec.shape) == 3
                    ), 'Input should have acoustic feature of shape BxTxD'
        # spec shape: [batch_size, sequence_length // downsample_rate, output_dim * downsample_rate]
        spec_flatten = spec.view(spec.shape[0], spec.shape[1] * self.dr,
                                 spec.shape[2] // self.dr)
        if return_first: return spec_flatten[0]
        return spec_flatten  # spec_flatten shape: [batch_size, sequence_length * downsample_rate, output_dim // downsample_rate]

    def down_sample_frames(self, spec):
        left_over = spec.shape[1] % self.dr
        if left_over != 0: spec = spec[:, :-left_over, :]
        spec_stacked = spec.view(spec.shape[0], spec.shape[1] // self.dr,
                                 spec.shape[2] * self.dr)
        return spec_stacked

    def position_encoding(self, seq_len, batch_size=None, padding_idx=None):
        ''' Sinusoid position encoding table '''
        def cal_angle(position, hid_idx):
            return position / np.power(10000, 2 *
                                       (hid_idx // 2) / self.hidden_size)

        def get_posi_angle_vec(position):
            return [
                cal_angle(position, hid_j) for hid_j in range(self.hidden_size)
            ]

        sinusoid_table = np.array(
            [get_posi_angle_vec(pos_i) for pos_i in range(seq_len)])

        sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2])  # dim 2i
        sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2])  # dim 2i+1

        if padding_idx is not None:
            sinusoid_table[
                padding_idx:] = 0.  # zero vector for padding dimension

        if batch_size is not None:
            batch_sinusoid_table = np.repeat(sinusoid_table[np.newaxis, ...],
                                             batch_size,
                                             axis=0)
            return batch_sinusoid_table  # (batch_size, seq_len, hidden_size)
        else:
            return sinusoid_table  # (seq_len, hidden_size)