def __init__(self, options, inp_dim, config=None):
        super(TRANSFORMER, self).__init__()

        if config is not None:
            self.config = yaml.load(open(config, 'r'), Loader=yaml.FullLoader)
        else:
            all_states = torch.load(options["ckpt_file"], map_location='cpu')
            self.config = all_states['Settings']['Config']

        self.no_grad = bool(strtobool(options['no_grad']))
        self.spec_aug = bool(strtobool(options['spec_aug']))
        self.spec_aug_prev = bool(strtobool(options['spec_aug_prev']))
        self.weighted_sum = bool(strtobool(options['weighted_sum']))
        self.select_layer = int(options['select_layer'])
        if (not self.no_grad) and (not self.spec_aug_prev):
            raise RuntimeError('Only one of them can be set False!')

        # increase dropout
        if str(options['dropout']) != 'default':
            self.config['transformer']['hidden_dropout_prob'] = float(
                options['dropout'])
            self.config['transformer']['attention_probs_dropout_prob'] = float(
                options['dropout'])

        # Model Config
        self.model_config = TransformerConfig(self.config)
        self.dr = self.model_config.downsample_rate
        self.hidden_size = self.model_config.hidden_size
        self.num_layers = self.model_config.num_hidden_layers
        self.max_input_length = self.config['transformer'][
            'max_input_length'] if 'max_input_length' in self.config[
                'transformer'] else 0
        if self.max_input_length > 0:
            print('[Transformer] - Maximum input length: ',
                  self.max_input_length)
        if not (self.select_layer in list(range(-1, self.num_layers))):
            raise RuntimeError('Out of range int for \'select_layer\'!')

        # use weighted sum from all layers
        if self.weighted_sum:
            self.weight = nn.Parameter(
                torch.ones(self.num_layers) / self.num_layers)

        # Build model
        self.device = torch.device(
            'cuda') if torch.cuda.is_available() else torch.device('cpu')
        self.model = TransformerModel(self.model_config,
                                      inp_dim).to(self.device)
        self.model.eval() if self.no_grad else self.model.train()

        # Load from a PyTorch state_dict
        load = bool(strtobool(options["load_pretrain"]))
        if load:
            self.load_model(all_states['Transformer'])
            print('[Transformer] - Number of parameters: ' + str(
                sum(p.numel()
                    for p in self.model.parameters() if p.requires_grad)))

        self.out_dim = self.hidden_size  # 768, This attribute is for pytorch-kaldi and downstream runner
        self.permute_input = True  # This attribute is for the forward method. If Ture then input ouput is in the shape of (T, B, D), if False then in (B, T, D)
Example #2
0
class TRANSFORMER(TransformerBaseWrapper):
    """
    Use this class to extract features from the Transformer model,
    or to finetune the pre-trained Transformer with any downstream tasks.
    Also, this class is `pytorch-kaldi` ready,
    hence we need to use `str` instead of `bool` in the options dict,
    as pytorch-kaldi scripts will pass in str.

    Params:
        `options`: a python dictionary containing the following keys:
            ckpt_file: str, a path specifying the pre-trained ckpt file
            load_pretrain: str, ['True', 'False'], whether to load pre-trained weights
            no_grad: str, ['True', 'False'], whether to have gradient flow over this class
            dropout: float/str, use float to modify dropout value during downstream finetune, or use the str `default` for pre-train default values
            spec_aug: str, ['True', 'False'], whether to apply SpecAugment on inputs (used for ASR training)
            spec_aug_prev: str, ['True', 'False'], apply spec augment on input acoustic features if True, else apply on output representations (used for ASR training)
            weighted_sum: str, ['True', 'False'], whether to use a learnable weighted sum to integrate hidden representations from all layers, if False then use the last
            select_layer: int, select from all hidden representations, set to -1 to select the last (will only be used when weighted_sum is False)
            permute_input: str, ['True', 'False'], this attribute is for the forward method. If Ture then input ouput is in the shape of (T, B, D), if False then in (B, T, D)
        `intput_dim`: int, input dimension of model
        `config`: optional, reads the given yaml config and not use the config stored in `ckpt_file`

    An example `options` dictionary:
    options = {
        'ckpt_file'     : './result/result_transformer/libri_sd1337_fmllrBase960-F-N-K-RA/states-1000000.ckpt',
        'load_pretrain' : 'True',
        'no_grad'       : 'True',
        'dropout'       : 'default',
        'spec_aug'      : 'False',
        'spec_aug_prev' : 'True',
        'weighted_sum'  : 'False',
        'select_layer'  : -1,
        'permute_input' : 'False',
    }
    """
    def __init__(self, options, inp_dim, config=None, online_config=None):
        super(TRANSFORMER, self).__init__(options, inp_dim, config, online_config)

        # Build model
        self.model = TransformerModel(self.model_config, self.inp_dim).to(self.device)
        self.model.eval() if self.no_grad else self.model.train()
        self.out_dim = self.hidden_size # This attribute is necessary, for pytorch-kaldi and run_downstream.py
        
        # Load from a PyTorch state_dict
        if self.load: 
            self.model = self.load_model(self.model, self.all_states['Transformer'])
            print('[Transformer] - Number of parameters: ' + str(sum(p.numel() for p in self.model.parameters() if p.requires_grad)))


    def forward(self, x):
        if hasattr(self, 'preprocessor'):
            x = self.preprocessor(x.transpose(1, 2).contiguous())[0]
        if self.no_grad:
            with torch.no_grad():
                x = self._forward(x)
        else:
            x = self._forward(x)
        return x
Example #3
0
 def __init__(self, *args, embedding_rank=None, inner_rank=None, ffward_rank=None, **kwargs):
     if gconfig.model == "transformer":
         TransformerModel.__init__(self, *args, embedding_rank, inner_rank, ffward_rank, **kwargs)
     else:
         super(PrunedModel, self).__init__(*args, **kwargs)
     self.currently_pruned = False
     parameter_keys = [param[0] for param in self.named_parameters()]
     self.parameter_prefixes = list(
         set([name.split(".")[0] for name in parameter_keys]+[".".join(name.split(".")[:2]) for name in parameter_keys]+[".".join(name.split(".")[:-1]) for name in parameter_keys]))
Example #4
0
    def __init__(self, options, inp_dim, config=None, online_config=None):
        super(TRANSFORMER, self).__init__(options, inp_dim, config, online_config)

        # Build model
        self.model = TransformerModel(self.model_config, self.inp_dim).to(self.device)
        self.model.eval() if self.no_grad else self.model.train()
        self.out_dim = self.hidden_size # This attribute is necessary, for pytorch-kaldi and run_downstream.py
        
        # Load from a PyTorch state_dict
        if self.load: 
            self.model = self.load_model(self.model, self.all_states['Transformer'])
            print('[Transformer] - Number of parameters: ' + str(sum(p.numel() for p in self.model.parameters() if p.requires_grad)))
    def __init__(self,
                 config,
                 input_dim,
                 output_attentions=False,
                 keep_multihead_output=False):
        super(TransformerPhoneticEncoder,
              self).__init__(config, output_attentions)
        self.Transformer = TransformerModel(
            config,
            input_dim,
            output_attentions=output_attentions,
            keep_multihead_output=keep_multihead_output)
        if config.phone_type == 'l2':
            self.PhoneRecognizer = VectorQuantizeLayer_L2(
                config.hidden_size, config.phone_size, config.phone_dim)
        elif config.phone_type == 'gb':
            self.PhoneRecognizer = VectorQuantizeLayer_GB(
                config.hidden_size, config.phone_size, config.phone_dim)
        elif config.phone_type == 'gst':
            self.PhoneRecognizer = GlobalStyleTokenLayer(
                config.hidden_size, config.phone_size, config.phone_dim)
        elif config.phone_type == 'linear':
            self.PhoneRecognizer = LinearLayer(config.hidden_size,
                                               config.phone_dim)
        elif config.phone_type == 'none':
            self.PhoneRecognizer = DummyLayer(config.phone_dim)
        else:
            raise NotImplementedError

        self.apply(self.init_Transformer_weights)
        self.out_dim = self.PhoneRecognizer.out_dim
Example #6
0
def main(args):

    wand_logger = WandbLogger(offline=False,
                              project='Transformer',
                              save_dir='./lightning_logs/')
    wand_logger.log_hyperparams(params=args)

    checkpoint = ModelCheckpoint(
        filepath='./lightning_logs/checkpoints/checkpoints',
        monitor='val_loss',
        verbose=0,
        save_top_k=2)

    model = TransformerModel(**vars(args))
    trainer = Trainer(
        logger=wand_logger,
        early_stop_callback=False,
        checkpoint_callback=checkpoint,
        # fast_dev_run=True,
        # overfit_pct=0.03,
        # profiler=True,
        auto_lr_find=False,
        # val_check_interval=1.0,
        # log_save_interval=50000,
        # row_log_interval=50000,
        max_epochs=args.epochs,
        min_epochs=1,
    )
    # lr_finder = trainer.lr_find(model)
    # print(lr_finder.results)
    trainer.fit(model)
    def __init__(self,
                 config,
                 input_dim,
                 output_attentions=False,
                 keep_multihead_output=False,
                 with_recognizer=True):
        super(TransformerSpeakerEncoder,
              self).__init__(config, output_attentions)
        self.Transformer = TransformerModel(
            config,
            input_dim,
            output_attentions=output_attentions,
            keep_multihead_output=keep_multihead_output)
        if config.speaker_type == 'gst' and with_recognizer:
            self.SpeakerRecognizer = GlobalStyleTokenLayer(
                config.hidden_size, config.speaker_size, config.speaker_dim)
        elif config.speaker_type == 'linear' and with_recognizer:
            self.SpeakerRecognizer = LinearLayer(config.hidden_size,
                                                 config.speaker_dim)
        elif config.phone_type == 'none' and with_recognizer:
            with_recognizer = False
        elif with_recognizer:
            raise NotImplementedError

        self.average_pooling = config.average_pooling
        self.with_recognizer = with_recognizer
        self.apply(self.init_Transformer_weights)
        self.out_dim = self.SpeakerRecognizer.out_dim if with_recognizer else config.hidden_size
    def __init__(self,
                 config,
                 input_dim,
                 output_dim,
                 output_attentions=False,
                 keep_multihead_output=False):
        super(DualTransformerForMaskedAcousticModel,
              self).__init__(config, output_attentions)

        assert config.dual_transformer, 'This config attribute should be set to True!'
        self.use_pe = config.intermediate_pe
        self.combine = config.combine
        self.phone_dim = config.phone_dim
        self.speaker_dim = config.speaker_dim
        if config.combine == 'concat':
            code_dim = self.phone_dim + self.speaker_dim
        elif config.combine == 'add':
            assert self.phone_dim == self.speaker_dim
            code_dim = self.phone_dim
        else:
            raise NotImplementedError

        self.SPE = nn.Parameter(
            torch.FloatTensor([1.0])
        )  # Scaled positional encoding (SPE) introduced in https://arxiv.org/abs/1809.08895
        self.SpecTransformer = TransformerModel(
            config,
            input_dim=code_dim,
            output_attentions=output_attentions,
            keep_multihead_output=keep_multihead_output,
            with_input_module=True if self.use_pe else False)
        self.SpecHead = TransformerSpecPredictionHead(
            config, output_dim if output_dim is not None else input_dim)

        if self.phone_dim > 0:
            self.PhoneticTransformer = TransformerPhoneticEncoder(
                config, input_dim, output_attentions, keep_multihead_output)
        if self.speaker_dim > 0:
            self.SpeakerTransformer = TransformerSpeakerEncoder(
                config, input_dim, output_attentions, keep_multihead_output)

        if len(config.pre_train) > 0:
            all_states = torch.load(config.pre_train, map_location='cpu')
            if self.phone_dim > 0:
                self.PhoneticTransformer.Transformer = load_model(
                    self.PhoneticTransformer.Transformer,
                    all_states['Transformer'])
            if self.speaker_dim > 0:
                self.SpeakerTransformer.Transformer = load_model(
                    self.SpeakerTransformer.Transformer,
                    all_states['Transformer'])

        self.apply(self.init_Transformer_weights)
        self.loss = nn.L1Loss()
Example #9
0
def get_args():
    root_dir = os.path.dirname(os.path.realpath(__file__))
    parent_parser = ArgumentParser(add_help=False)
    parent_parser.add_argument('--seed', default=1, type=int)
    parent_parser.add_argument('--epochs', default=20, type=int)
    parent_parser.add_argument('--batch_size', default=64, type=int)
    parent_parser.add_argument('--learning_rate', default=0.00275, type=float)
    parent_parser.add_argument('--gpus', default=None)
    parent_parser.add_argument('--tpu_cores', default=None)
    parent_parser.add_argument('--num_workers', default=8)
    parser = TransformerModel.add_model_specific_args(parent_parser, root_dir)
    return parser.parse_args()
Example #10
0
def decode(load_from=None):
    """
    performs decoding on a test set, and save the best-scoring decoding results.
    If the target gold-standard sentences are given, the function also computes
    corpus-level BLEU score.
    """

    if gconfig.test:
        data_src = read_corpus(paths.test_source, source='src')
        data_tgt = read_corpus(paths.test_target, source='tgt')
        data_tgt_path = paths.test_target
    else:
        data_src = read_corpus(paths.dev_source, source='src')
        data_tgt = read_corpus(paths.dev_target, source='tgt')
        data_tgt_path = paths.dev_target

    print(f"load model from {paths.model}", file=sys.stderr)
    if load_from is not None:
        model_load_path = load_from
    else:
        model_load_path = paths.model

    model = TransformerModel.load(model_load_path)
    if gconfig.cuda:
        model.to_gpu()
    model.eval()
    max_step = dconfig.max_decoding_time_step
    if gconfig.sanity:
        max_step = 2

    hypotheses = routine.batch_beam_search(model,
                                           data_src,
                                           max_step,
                                           batch_size=dconfig.batch_size,
                                           replace=dconfig.replace)

    lines = []
    for src_sent, hyps in zip(data_src, hypotheses):
        top_hyp = hyps[0]
        lines.append(top_hyp.value)
    write_sents(lines, paths.decode_output)

    bleu_command = "perl scripts/multi-bleu.perl " + data_tgt_path + " < " + paths.decode_output
    os.system(bleu_command)
class TRANSFORMER(nn.Module):
    def __init__(self, options, inp_dim, config=None):
        super(TRANSFORMER, self).__init__()

        if config is not None:
            self.config = yaml.load(open(config, 'r'), Loader=yaml.FullLoader)
        else:
            all_states = torch.load(options["ckpt_file"], map_location='cpu')
            self.config = all_states['Settings']['Config']

        self.no_grad = bool(strtobool(options['no_grad']))
        self.spec_aug = bool(strtobool(options['spec_aug']))
        self.spec_aug_prev = bool(strtobool(options['spec_aug_prev']))
        self.weighted_sum = bool(strtobool(options['weighted_sum']))
        self.select_layer = int(options['select_layer'])
        if (not self.no_grad) and (not self.spec_aug_prev): raise RuntimeError('Only one of them can be set False!')
        
        # increase dropout
        if str(options['dropout']) != 'default':
            self.config['transformer']['hidden_dropout_prob'] = float(options['dropout'])
            self.config['transformer']['attention_probs_dropout_prob'] = float(options['dropout'])

        # Model Config
        self.model_config = TransformerConfig(self.config)
        self.dr = self.model_config.downsample_rate
        self.hidden_size = self.model_config.hidden_size
        self.num_layers = self.model_config.num_hidden_layers
        self.max_input_length = self.config['transformer']['max_input_length'] if 'max_input_length' in self.config['transformer'] else 0
        if self.max_input_length > 0: print('[Transformer] - Maximum input length: ', self.max_input_length)
        if not (self.select_layer in list(range(-1, self.num_layers))): raise RuntimeError('Out of range int for \'select_layer\'!')

        # use weighted sum from all layers
        if self.weighted_sum:
            self.weight = nn.Parameter(torch.ones(self.num_layers) / self.num_layers)

        # Build model
        self.inp_dim = inp_dim if inp_dim > 0 else self.config['transformer']['input_dim']
        self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
        self.model = TransformerModel(self.model_config, self.inp_dim).to(self.device)
        self.model.eval() if self.no_grad else self.model.train()
        
        # Load from a PyTorch state_dict
        load = bool(strtobool(options["load_pretrain"]))
        if load: 
            self.load_model(all_states['Transformer'])
            print('[Transformer] - Number of parameters: ' + str(sum(p.numel() for p in self.model.parameters() if p.requires_grad)))
        
        self.out_dim = self.hidden_size # 768, This attribute is for pytorch-kaldi and downstream runner
        self.permute_input = True # This attribute is for the forward method. If Ture then input ouput is in the shape of (T, B, D), if False then in (B, T, D)


    def load_model(self, state_dict):
        try:
            old_keys = []
            new_keys = []
            for key in state_dict.keys():
                new_key = None
                if 'gamma' in key:
                    new_key = key.replace('gamma', 'weight')
                if 'beta' in key:
                    new_key = key.replace('beta', 'bias')
                if new_key:
                    old_keys.append(key)
                    new_keys.append(new_key)
            for old_key, new_key in zip(old_keys, new_keys):
                state_dict[new_key] = state_dict.pop(old_key)

            missing_keys = []
            unexpected_keys = []
            error_msgs = []
            # copy state_dict so _load_from_state_dict can modify it
            metadata = getattr(state_dict, '_metadata', None)
            state_dict = state_dict.copy()
            if metadata is not None:
                state_dict._metadata = metadata

            def load(module, prefix=''):
                local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
                module._load_from_state_dict(
                    state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
                for name, child in module._modules.items():
                    if child is not None:
                        load(child, prefix + name + '.')

            load(self.model)
            if len(missing_keys) > 0:
                print('Weights of {} not initialized from pretrained model: {}'.format(
                    self.model.__class__.__name__, missing_keys))
            if len(unexpected_keys) > 0:
                print('Weights from pretrained model not used in {}: {}'.format(
                    self.model.__class__.__name__, unexpected_keys))
            if len(error_msgs) > 0:
                raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
                                    self.model.__class__.__name__, '\n\t'.join(error_msgs)))
            print('[Transformer] - Pre-trained weights loaded!')

        except: 
            raise RuntimeError('[Transformer] - Pre-trained weights NOT loaded!')


    def down_sample_frames(self, spec):
        spec = spec.contiguous()
        left_over = spec.shape[1] % self.dr
        if left_over != 0: spec = spec[:, :-left_over, :]
        spec_stacked = spec.view(spec.shape[0], spec.shape[1]//self.dr, spec.shape[2]*self.dr)
        return spec_stacked
        

    def process_input_data(self, spec):
        """Process input data for the model"""
        
        # add arbitary batch axis B if input `spec` has shape of TxD
        if len(spec.shape) == 2:
            spec = spec.unsqueeze(0)
        # input `spec` should have shape BxTxD
        elif len(spec.shape) != 3:
            raise ValueError('Input argument `spec` has invalid shape: {}'.format(spec.shape))

        # Down sample
        if self.dr > 1:
            spec_stacked = self.down_sample_frames(spec) # (batch_size, seq_len, feature_dim * dr)
        else:
            spec_stacked = spec

        # Record length for each uttr
        spec_len = np.sum(np.sum(spec_stacked.cpu().data.numpy(), axis=-1) != 0, axis=-1)
        spec_len = [int(sl) for sl in spec_len]

        batch_size = spec_stacked.shape[0]
        seq_len = spec_stacked.shape[1]

        pos_enc = position_encoding(seq_len, self.hidden_size) # (seq_len, hidden_size)
        attn_mask = np.ones((batch_size, seq_len)) # (batch_size, seq_len)

        # zero vectors for padding dimension
        for idx in range(len(spec_stacked)):
            attn_mask[idx][spec_len[idx]:] = 0 

        if self.spec_aug and self.spec_aug_prev and self.model.training:
            spec_stacked = spec_augment(spec_stacked, mask_T=70, mask_F=4, num_T=2, num_F=2, p=1.0) # (batch_size, seq_len, feature_dim * dr)
        spec_stacked = spec_stacked.to(device=self.device, dtype=torch.float32) # (batch_size, seq_len, feature_dim * dr)
        pos_enc = torch.FloatTensor(pos_enc).to(device=self.device, dtype=torch.float32).expand(spec_stacked.size(0), *pos_enc.size()) # (batch_size, seq_len, hidden_size)
        attn_mask = torch.FloatTensor(attn_mask).to(device=self.device, dtype=torch.float32) # (batch_size, seq_len)
        return spec_stacked, pos_enc, attn_mask # (x, pos_enc, attention_mask)


    def tile_representations(self, reps):
        """ 
        Tile up the speech representations to match the amount of input frames.
        Input - encoded_layers shape: (batch_size, sequence_length, hidden_size)
        Output - tiled_encoded_layers shape: (batch_size, sequence_length * downsample_rate, hidden_size)
        """
        if len(reps.shape) != 3:
            raise ValueError('Input argument `reps` has invalid shape: {}'.format(reps.shape))

        tiled_reps = reps.repeat(1, 1, self.dr)
        tiled_reps = tiled_reps.reshape(reps.size(0), reps.size(1)*self.dr, reps.size(2))
        return tiled_reps # (batch_size, sequence_length * downsample_rate, hidden_size)


    def upsample(self, x, input_len):
        # Compute padding to compromise the downsample loss
        left_over = input_len % self.dr
        if left_over % 2 == 0:
            left_pad = left_over // 2
            right_pad = left_pad
        else:
            left_pad = left_over // 2
            right_pad = left_over // 2 + 1
        
        x = self.tile_representations(x)

        # padding
        x = x.permute(0, 2, 1).contiguous() # (B, T, D) -> (B, D, T)
        padding = nn.ReplicationPad1d((left_pad, right_pad))
        x = padding(x)
        
        x = x.permute(0, 2, 1).contiguous() # (B, D, T) -> (B, T, D)
        return x


    def _forward(self, x):

        if self.permute_input:
            x = x.permute(1, 0, 2).contiguous() # (T, B, D) -> (B, T, D)
        input_len = x.shape[1]

        # forward the whole sequence at once
        if self.max_input_length == 0 or input_len <= self.max_input_length:
            spec_stacked, pos_enc, attn_mask = self.process_input_data(x) # x shape: (B, T, D)
            x = self.model(spec_stacked, pos_enc, attn_mask, output_all_encoded_layers=self.weighted_sum or self.select_layer != -1) # (B, T, D) or # (N, B, T, D)
        # forward the sequence in chunks then concat
        else:
            chunks = torch.chunk(x, chunks=math.ceil(input_len / self.max_input_length), dim=1)
            x_ = []
            for chunk in chunks:
                spec_stacked, pos_enc, attn_mask = self.process_input_data(chunk) # x shape: (B, T, D)
                chunk = self.model(spec_stacked, pos_enc, attn_mask, output_all_encoded_layers=self.weighted_sum or self.select_layer != -1) # (B, T, D) or # (N, B, T, D)
                x_.append(torch.stack(chunk) if type(chunk) is list else chunk)
            x = torch.cat(x_, dim=2 if (self.weighted_sum or self.select_layer != -1) else 1)

        # Apply weighted sum
        if self.weighted_sum:
            if type(x) is list: x = torch.stack(x)
            softmax_weight = nn.functional.softmax(self.weight, dim=-1)
            B, T, D = x.shape[1], x.shape[2], x.shape[3]
            x = x.reshape(self.num_layers, -1)
            x = torch.matmul(softmax_weight, x).reshape(B, T, D)
        # Select a specific layer
        elif self.select_layer != -1:
            x = x[self.select_layer]

        if self.spec_aug and not self.spec_aug_prev and self.model.training:
            x = spec_augment(x, mask_T=70, mask_F=86, num_T=2, num_F=2, p=1.0) # (B, T, D)

        # If using a downsampling model, apply tile and padding
        if self.dr > 1:
            x = self.upsample(x, input_len) # (B, T, D)
        
        # permute to output
        if self.permute_input:
            x = x.permute(1, 0, 2).contiguous() # (B, T, D) -> (T, B, D)

        return x # (B, T, D) or (T, B, D)


    def forward(self, x):
        if self.no_grad:
            with torch.no_grad():
                self.model.eval()
                x = self._forward(x)
        else:
            x = self._forward(x)
        return x
Example #12
0
def main(checkpoint_path, sentence, num_steps):

    pretrained_model= TransformerModel.load_from_checkpoint(checkpoint_path)
    prediction = pretrained_model.predict(sentence, num_steps)

    print(sentence, ' --> ', prediction)
Example #13
0
class Solver():
    ''' Super class Solver for all kinds of tasks'''
    def __init__(self, config, paras):

        # General Settings
        self.config = config
        self.paras = paras
        self.transformer_config = config['transformer']
        self.device = torch.device('cuda') if (
            self.paras.gpu
            and torch.cuda.is_available()) else torch.device('cpu')
        if torch.cuda.is_available(): self.verbose('CUDA is available!')

        # path and directories
        self.exp_name = paras.name
        if self.exp_name is None:
            self.exp_name = '_'.join([
                paras.config.split('/')[-1].replace('.yaml', ''),
                'sd' + str(paras.seed)
            ])
        self.ckpdir = paras.ckpdir
        self.expdir = os.path.join(self.ckpdir, self.exp_name)

        self.load = paras.load
        # only for test
        self.ckpt = os.path.join(self.ckpdir, paras.ckpt)

        # model
        self.load_model_list = config['solver']['load_model_list']
        self.duo_feature = config['solver']['duo_feature']
        self.output_dim = 1025 if self.duo_feature else None  # output dim is the same as input dim if not using duo features
        if 'input_dim' in self.transformer_config:
            self.input_dim = self.transformer_config['input_dim']
        else:
            raise ValueError(
                'Please update your config file to include the attribute `input_dim`.'
            )

    def verbose(self, msg, end='\n'):
        ''' Verbose function for print information to stdout'''
        if self.paras.verbose:
            print('[SOLVER] - ', msg, end=end)

    def load_data(self, split='train'):
        ''' Load data for training / testing'''
        if split == 'train':
            self.verbose('Loading source data ' +
                         str(self.config['dataloader']['train_set']) +
                         ' from ' + self.config['dataloader']['data_path'])
            if self.duo_feature:
                self.verbose('Loading target data ' +
                             str(self.config['dataloader']['train_set']) +
                             ' from ' +
                             self.config['dataloader']['target_path'])
        elif split == 'test':
            self.verbose('Loading testing data ' +
                         str(self.config['dataloader']['test_set']) +
                         ' from ' + self.config['dataloader']['data_path'])
        else:
            raise NotImplementedError('Invalid `split` argument!')

        if self.duo_feature:
            setattr(self, 'dataloader', get_Dataloader(split, load='duo', use_gpu=self.paras.gpu, \
                    mam_config=self.transformer_config, **self.config['dataloader'])) # run_mam is automatically performed
        else:
            setattr(self, 'dataloader', get_Dataloader(split, load='acoustic', use_gpu=self.paras.gpu, run_mam=True, \
                    mam_config=self.transformer_config, **self.config['dataloader']))

    def set_model(self,
                  inference=False,
                  with_head=False,
                  from_path=None,
                  output_attention=False):
        self.verbose('Initializing Transformer model.')

        # uild the Transformer model with speech prediction head
        self.model_config = TransformerConfig(self.config)
        self.dr = self.model_config.downsample_rate
        self.hidden_size = self.model_config.hidden_size
        self.with_head = with_head
        self.output_attention = output_attention

        if not inference or with_head:
            self.model = TransformerForMaskedAcousticModel(
                self.model_config, self.input_dim, self.output_dim,
                self.output_attention).to(self.device)
            self.transformer = self.model.Transformer
            if self.paras.multi_gpu:
                self.model = torch.nn.DataParallel(self.model)
                self.transformer = torch.nn.DataParallel(self.transformer)
                self.verbose('Multi-GPU training Enabled: ' +
                             str(torch.cuda.device_count()))
            self.verbose('Number of parameters: ' + str(
                sum(p.numel()
                    for p in self.model.parameters() if p.requires_grad)))

        if inference and not with_head:
            self.transformer = TransformerModel(
                self.model_config, self.input_dim,
                self.output_attention).to(self.device)
            if self.paras.multi_gpu:
                self.transformer = torch.nn.DataParallel(self.transformer)
                self.verbose('Multi-GPU training Enabled: ' +
                             str(torch.cuda.device_count()))
            self.verbose('Number of parameters: ' + str(
                sum(p.numel() for p in self.transformer.parameters()
                    if p.requires_grad)))
            self.transformer.eval()
        elif inference and with_head:
            self.model.eval()
        elif not inference:
            self.model.train()

            # Setup optimizer
            param_optimizer = list(self.model.named_parameters())

            no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
            optimizer_grouped_parameters = [{
                'params': [
                    p for n, p in param_optimizer
                    if not any(nd in n for nd in no_decay)
                ],
                'weight_decay':
                0.01
            }, {
                'params': [
                    p for n, p in param_optimizer
                    if any(nd in n for nd in no_decay)
                ],
                'weight_decay':
                0.0
            }]

            if self.apex:
                try:
                    from apex.optimizers import FP16_Optimizer
                    from apex.optimizers import FusedAdam
                except ImportError:
                    raise ImportError(
                        "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training."
                    )

                optimizer = FusedAdam(optimizer_grouped_parameters,
                                      lr=self.learning_rate,
                                      bias_correction=False,
                                      max_grad_norm=1.0)
                if self.config['optimizer']['loss_scale'] == 0:
                    self.optimizer = FP16_Optimizer(optimizer,
                                                    dynamic_loss_scale=True)
                else:
                    self.optimizer = FP16_Optimizer(
                        optimizer,
                        static_loss_scale=self.config['optimizer']
                        ['loss_scale'])
                self.warmup_linear = WarmupLinearSchedule(
                    warmup=self.warmup_proportion, t_total=self.total_steps)
            else:
                self.optimizer = BertAdam(optimizer_grouped_parameters,
                                          lr=self.learning_rate,
                                          warmup=self.warmup_proportion,
                                          t_total=self.total_steps)
        else:
            raise NotImplementedError('Invalid Arguments!')

        if self.load:  # This will be set to True by default when Tester is running set_model()
            self.load_model(inference=inference,
                            with_head=with_head,
                            from_path=from_path)

    def save_model(self, name='states', model_all=True, to_path=None):
        if model_all:
            all_states = {
                'SpecHead':
                self.model.SpecHead.state_dict() if not self.paras.multi_gpu
                else self.model.module.SpecHead.state_dict(),
                'Transformer':
                self.transformer.state_dict() if not self.paras.multi_gpu else
                self.transformer.module.state_dict(),
                'Optimizer':
                self.optimizer.state_dict(),
                'Global_step':
                self.global_step,
                'Settings': {
                    'Config': self.config,
                    'Paras': self.paras,
                },
            }
        else:
            all_states = {
                'Transformer':
                self.transformer.state_dict() if not self.paras.multi_gpu else
                self.transformer.module.state_dict(),
                'Settings': {
                    'Config': self.config,
                    'Paras': self.paras,
                },
            }
        if to_path is None:
            new_model_path = '{}/{}-{}.ckpt'.format(self.expdir, name,
                                                    self.global_step)
        else:
            new_model_path = to_path
        torch.save(all_states, new_model_path)
        self.model_kept.append(new_model_path)

        if len(self.model_kept) >= self.max_keep:
            os.remove(self.model_kept[0])
            self.model_kept.pop(0)

    def load_model(self, inference=False, with_head=False, from_path=None):
        if from_path is not None:
            self.verbose('Load model from {}'.format(from_path))
            all_states = torch.load(from_path, map_location='cpu')
            self.load_model_list = ['Transformer']
        else:
            self.verbose('Load model from {}'.format(self.ckpt))
            all_states = torch.load(self.ckpt, map_location='cpu')

        if 'SpecHead' in self.load_model_list:
            if not inference or with_head:
                try:
                    if not self.paras.multi_gpu:
                        self.model.SpecHead.load_state_dict(
                            all_states['SpecHead'])
                    else:
                        self.model.module.SpecHead.load_state_dict(
                            all_states['SpecHead'])
                    self.verbose('[SpecHead] - Loaded')
                except:
                    self.verbose('[SpecHead - X]')

        if 'Transformer' in self.load_model_list:
            try:
                state_dict = all_states['Transformer']

                # Load from a PyTorch state_dict
                old_keys = []
                new_keys = []
                for key in state_dict.keys():
                    new_key = None
                    if 'gamma' in key:
                        new_key = key.replace('gamma', 'weight')
                    if 'beta' in key:
                        new_key = key.replace('beta', 'bias')
                    if new_key:
                        old_keys.append(key)
                        new_keys.append(new_key)
                for old_key, new_key in zip(old_keys, new_keys):
                    state_dict[new_key] = state_dict.pop(old_key)

                missing_keys = []
                unexpected_keys = []
                error_msgs = []
                # copy state_dict so _load_from_state_dict can modify it
                metadata = getattr(state_dict, '_metadata', None)
                state_dict = state_dict.copy()
                if metadata is not None:
                    state_dict._metadata = metadata

                def load(module, prefix=''):
                    local_metadata = {} if metadata is None else metadata.get(
                        prefix[:-1], {})
                    module._load_from_state_dict(state_dict, prefix,
                                                 local_metadata, True,
                                                 missing_keys, unexpected_keys,
                                                 error_msgs)
                    for name, child in module._modules.items():
                        if child is not None:
                            load(child, prefix + name + '.')

                # perform load
                if not self.paras.multi_gpu:
                    load(self.transformer)
                else:
                    load(self.transformer.module)

                if len(missing_keys) > 0:
                    self.verbose(
                        "Weights of {} not initialized from pretrained model: {}"
                        .format(self.transformer.__class__.__name__,
                                missing_keys))
                if len(unexpected_keys) > 0:
                    self.verbose(
                        "Weights from pretrained model not used in {}: {}".
                        format(self.transformer.__class__.__name__,
                               unexpected_keys))
                if len(error_msgs) > 0:
                    raise RuntimeError(
                        'Error(s) in loading state_dict for {}:\n\t{}'.format(
                            self.transformer.__class__.__name__,
                            "\n\t".join(error_msgs)))
                self.verbose('[Transformer] - Loaded')
            except:
                self.verbose('[Transformer - X]')

        if 'Optimizer' in self.load_model_list and not inference:
            try:
                self.optimizer.load_state_dict(all_states['Optimizer'])
                for state in self.optimizer.state.values():
                    for k, v in state.items():
                        if torch.is_tensor(v):
                            state[k] = v.cuda()
                self.verbose('[Optimizer] - Loaded')
            except:
                self.verbose('[Optimizer - X]')

        if 'Global_step' in self.load_model_list and not inference:
            try:
                self.global_step = all_states['Global_step']
                self.verbose('[Global_step] - Loaded')
            except:
                self.verbose('[Global_step - X]')

        self.verbose('Model loading complete!')

    def up_sample_frames(self, spec, return_first=False):
        if len(spec.shape) != 3:
            spec = spec.unsqueeze(0)
            assert (len(spec.shape) == 3
                    ), 'Input should have acoustic feature of shape BxTxD'
        # spec shape: [batch_size, sequence_length // downsample_rate, output_dim * downsample_rate]
        spec_flatten = spec.view(spec.shape[0], spec.shape[1] * self.dr,
                                 spec.shape[2] // self.dr)
        if return_first: return spec_flatten[0]
        return spec_flatten  # spec_flatten shape: [batch_size, sequence_length * downsample_rate, output_dim // downsample_rate]

    def down_sample_frames(self, spec):
        left_over = spec.shape[1] % self.dr
        if left_over != 0: spec = spec[:, :-left_over, :]
        spec_stacked = spec.view(spec.shape[0], spec.shape[1] // self.dr,
                                 spec.shape[2] * self.dr)
        return spec_stacked

    def position_encoding(self, seq_len, batch_size=None, padding_idx=None):
        ''' Sinusoid position encoding table '''
        def cal_angle(position, hid_idx):
            return position / np.power(10000, 2 *
                                       (hid_idx // 2) / self.hidden_size)

        def get_posi_angle_vec(position):
            return [
                cal_angle(position, hid_j) for hid_j in range(self.hidden_size)
            ]

        sinusoid_table = np.array(
            [get_posi_angle_vec(pos_i) for pos_i in range(seq_len)])

        sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2])  # dim 2i
        sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2])  # dim 2i+1

        if padding_idx is not None:
            sinusoid_table[
                padding_idx:] = 0.  # zero vector for padding dimension

        if batch_size is not None:
            batch_sinusoid_table = np.repeat(sinusoid_table[np.newaxis, ...],
                                             batch_size,
                                             axis=0)
            return batch_sinusoid_table  # (batch_size, seq_len, hidden_size)
        else:
            return sinusoid_table  # (seq_len, hidden_size)
Example #14
0
    def set_model(self,
                  inference=False,
                  with_head=False,
                  from_path=None,
                  output_attention=False):
        self.verbose('Initializing Transformer model.')

        # uild the Transformer model with speech prediction head
        self.model_config = TransformerConfig(self.config)
        self.dr = self.model_config.downsample_rate
        self.hidden_size = self.model_config.hidden_size
        self.with_head = with_head
        self.output_attention = output_attention

        if not inference or with_head:
            self.model = TransformerForMaskedAcousticModel(
                self.model_config, self.input_dim, self.output_dim,
                self.output_attention).to(self.device)
            self.transformer = self.model.Transformer
            if self.paras.multi_gpu:
                self.model = torch.nn.DataParallel(self.model)
                self.transformer = torch.nn.DataParallel(self.transformer)
                self.verbose('Multi-GPU training Enabled: ' +
                             str(torch.cuda.device_count()))
            self.verbose('Number of parameters: ' + str(
                sum(p.numel()
                    for p in self.model.parameters() if p.requires_grad)))

        if inference and not with_head:
            self.transformer = TransformerModel(
                self.model_config, self.input_dim,
                self.output_attention).to(self.device)
            if self.paras.multi_gpu:
                self.transformer = torch.nn.DataParallel(self.transformer)
                self.verbose('Multi-GPU training Enabled: ' +
                             str(torch.cuda.device_count()))
            self.verbose('Number of parameters: ' + str(
                sum(p.numel() for p in self.transformer.parameters()
                    if p.requires_grad)))
            self.transformer.eval()
        elif inference and with_head:
            self.model.eval()
        elif not inference:
            self.model.train()

            # Setup optimizer
            param_optimizer = list(self.model.named_parameters())

            no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
            optimizer_grouped_parameters = [{
                'params': [
                    p for n, p in param_optimizer
                    if not any(nd in n for nd in no_decay)
                ],
                'weight_decay':
                0.01
            }, {
                'params': [
                    p for n, p in param_optimizer
                    if any(nd in n for nd in no_decay)
                ],
                'weight_decay':
                0.0
            }]

            if self.apex:
                try:
                    from apex.optimizers import FP16_Optimizer
                    from apex.optimizers import FusedAdam
                except ImportError:
                    raise ImportError(
                        "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training."
                    )

                optimizer = FusedAdam(optimizer_grouped_parameters,
                                      lr=self.learning_rate,
                                      bias_correction=False,
                                      max_grad_norm=1.0)
                if self.config['optimizer']['loss_scale'] == 0:
                    self.optimizer = FP16_Optimizer(optimizer,
                                                    dynamic_loss_scale=True)
                else:
                    self.optimizer = FP16_Optimizer(
                        optimizer,
                        static_loss_scale=self.config['optimizer']
                        ['loss_scale'])
                self.warmup_linear = WarmupLinearSchedule(
                    warmup=self.warmup_proportion, t_total=self.total_steps)
            else:
                self.optimizer = BertAdam(optimizer_grouped_parameters,
                                          lr=self.learning_rate,
                                          warmup=self.warmup_proportion,
                                          t_total=self.total_steps)
        else:
            raise NotImplementedError('Invalid Arguments!')

        if self.load:  # This will be set to True by default when Tester is running set_model()
            self.load_model(inference=inference,
                            with_head=with_head,
                            from_path=from_path)
Example #15
0
def train(load_from=None, save_to=None):
    print_file = sys.stderr
    if gconfig.printout:
        print_file = sys.stdout
    train_data_src = read_corpus(paths.train_source, source='src')
    train_data_tgt = read_corpus(paths.train_target, source='tgt')

    dev_data_src = read_corpus(paths.dev_source, source='src')
    dev_data_tgt = read_corpus(paths.dev_target, source='tgt')

    train_data = zip_data(train_data_src, train_data_tgt)
    dev_data = zip_data(dev_data_src, dev_data_tgt)

    train_batch_size = tconfig.batch_size
    valid_niter = gconfig.valid_niter
    log_every = gconfig.log_every
    if save_to is not None:
        model_save_path = save_to
    else:
        model_save_path = paths.model

    max_epoch = tconfig.max_epoch

    if gconfig.sanity:
        log_every = 1
        train_data = train_data[:150]
        dev_data = dev_data[:150]
        max_epoch = 2
    pretraining = gconfig.pretraining
    pretraining_encoder = gconfig.pretraining_encoder
    loaded = False
    if load_from is not None:
        try:
            print("Loading from", load_from)
            model = TransformerModel.load(load_from)
            loaded = True
            pretraining = False
            pretraining_encoder = False
        except FileNotFoundError:
            pass
    if not loaded:
        print("No loading file provided or found : training from scratch")
        print("Loading Transformer Model")
        model = TransformerModel()

    if gconfig.cuda:
        model.to_gpu()
    else:
        print("No cuda support")

    num_trial = 0
    train_iter = patience = cum_loss = report_loss = cumulative_tgt_words = report_tgt_words = 0
    cumulative_examples = report_examples = epoch = valid_num = 0
    hist_valid_scores = []
    train_time = begin_time = time.time()
    lr = tconfig.lr
    max_patience = tconfig.patience
    max_num_trial = tconfig.max_num_trial
    lr_decay = tconfig.lr_decay

    # if pretraining_encoder:
    #     #print("Pretraining the encoder")
    #     #pretrain.train_encoder(model, train_data, dev_data)
    #     print("Pretraining the encoder")
    #     routine.train_encoder(model, train_data, dev_data, model_save_path,
    #                           train_batch_size, valid_niter, log_every, tconfig.max_epoch_pretraining_encoder, lr, max_patience, max_num_trial, lr_decay)
    #     model.reset_optimizer()

    # if pretraining:

    #     print("Pretraining the decoder")
    #     routine.train_decoder(model, train_data, dev_data, model_save_path,
    #                           train_batch_size, valid_niter, log_every, tconfig.max_epoch_pretraining, lr, max_patience, max_num_trial, lr_decay)
    #     model.reset_optimizer()

    model = routine.train_model(model, train_data, dev_data, model_save_path,
                                train_batch_size, valid_niter, log_every,
                                max_epoch, lr, max_patience, max_num_trial,
                                lr_decay)
    model.to_cpu()