コード例 #1
0
ファイル: TRADE.py プロジェクト: stanford-oval/trade-dst
class BERTEncoder(nn.Module):
    def __init__(self, hidden_size, dropout, device):
        super(BERTEncoder, self).__init__()

        self.device = device
        # Load config and pre-trained model
        pre_trained_model = BertModel.from_pretrained(
            args['bert_model'],
            cache_dir=PYTORCH_PRETRAINED_BERT_CACHE /
            'distributed_{}'.format(-1))
        bert_config = pre_trained_model.config

        # modify config if you want
        bert_config.num_hidden_layers = args['num_bert_layers']

        self.bert = BertModel(bert_config)

        # load desired layers from pre-trained model
        self.bert.load_state_dict(pre_trained_model.state_dict(), strict=False)

        self.proj = nn.Linear(bert_config.hidden_size, hidden_size)

        self.dropout = dropout
        self.dropout_layer = nn.Dropout(dropout)

    def forward(self, all_input_ids, all_input_mask, all_segment_ids,
                all_sub_word_masks):

        sequence_output, pooled_output = self.bert(
            all_input_ids,
            attention_mask=all_input_mask,
            token_type_ids=all_segment_ids)

        output = self.proj(sequence_output)
        hidden = self.proj(pooled_output)

        return output, hidden
コード例 #2
0
class BertForMultiTaskClassification(BertPreTrainedModel):
    """
    PyTorch BERT class for multitask learning. This model allows you to load
    in some pretrained tasks in addition to creating new ones.

    Examples
    --------
    To instantiate a completely new instance of BertForMultiTaskClassification
    and load the weights into this architecture you can use the `from_pretrained`
    method of the base class by specifying the name of the weights to load, e.g.::

        model = BertForMultiTaskClassification.from_pretrained(
            'bert-base-uncased',
            new_task_dict=new_task_dict
        )

        # DO SOME TRAINING

        model.save(SOME_FOLDER, SOME_MODEL_ID)

    To instantiate an instance of BertForMultiTaskClassification that has layers for
    pretrained tasks and new tasks, you would do the following::

        model = BertForMultiTaskClassification.from_pretrained(
            'bert-base-uncased',
            pretrained_task_dict=pretrained_task_dict,
            new_task_dict=new_task_dict
        )

        model.load(SOME_FOLDER, SOME_MODEL_DICT)

        # DO SOME TRAINING

    Parameters
    ----------
    config: json file
        Defines the BERT model architecture.
        Note: you will most likely be instantiating the class with the `from_pretrained` method
        so you don't need to come up with your own config.
    pretrained_task_dict: dict
        dictionary mapping each pretrained task to the number of labels it has
    new_task_dict: dict
        dictionary mapping each new task to the number of labels it has
    dropout: float
        dropout percentage for Dropout layer
    """
    def __init__(self,
                 config,
                 pretrained_task_dict=None,
                 new_task_dict=None,
                 dropout=1e-1):
        super(BertForMultiTaskClassification, self).__init__(config)
        self.bert = BertModel(config)

        self.dropout = torch.nn.Dropout(dropout)

        if pretrained_task_dict is not None:
            pretrained_layers = {}
            for key, task_size in pretrained_task_dict.items():
                pretrained_layers[key] = nn.Linear(config.hidden_size,
                                                   task_size)
            self.pretrained_classifiers = nn.ModuleDict(pretrained_layers)
        if new_task_dict is not None:
            new_layers = {}
            for key, task_size in new_task_dict.items():
                new_layers[key] = nn.Linear(config.hidden_size, task_size)
            self.new_classifiers = nn.ModuleDict(new_layers)

    def forward(self, tokenized_input):
        """
        Defines forward pass for Bert model

        Parameters
        ----------
        tokenized_input: torch tensor of integers
            integers represent tokens for each word

        Returns
        ----------
        A dictionary mapping each task to its logits
        """
        outputs = self.bert(tokenized_input)

        pooled_output = self.dropout(outputs[1])

        logit_dict = {}
        if hasattr(self, 'pretrained_classifiers'):
            for key, classifier in self.pretrained_classifiers.items():
                logit_dict[key] = classifier(pooled_output)
        if hasattr(self, 'new_classifiers'):
            for key, classifier in self.new_classifiers.items():
                logit_dict[key] = classifier(pooled_output)

        return logit_dict

    def freeze_bert(self):
        """Freeze all core Bert layers"""
        for param in self.bert.parameters():
            param.requires_grad = False

    def freeze_pretrained_classifiers_and_bert(self):
        """Freeze pretrained classifier layers and core Bert layers"""
        self.freeze_bert()
        if hasattr(self, 'pretrained_classifiers'):
            for param in self.pretrained_classifiers.parameters():
                param.requires_grad = False
        else:
            print('There are no pretrained_classifier layers to be frozen.')

    def unfreeze_pretrained_classifiers(self):
        """Unfreeze pretrained classifier layers"""
        if hasattr(self, 'pretrained_classifiers'):
            for param in self.pretrained_classifiers.parameters():
                param.requires_grad = True
        else:
            print('There are no pretrained_classifier layers to be unfrozen.')

    def unfreeze_pretrained_classifiers_and_bert(self):
        """Unfreeze pretrained classifiers and core Bert layers"""
        for param in self.bert.parameters():
            param.requires_grad = True

        self.unfreeze_pretrained_classifiers()

    def save(self, folder, model_id):
        """
        Saves the model state dicts to a specific folder.
        Each part of the model is saved separately to allow for
        new classifiers to be added later.

        Note: if the model has `pretrained_classifiers` and `new_classifers`,
        they will be combined into the `pretrained_classifiers_dict`.

        Parameters
        ----------
        folder: str or Path
            place to store state dictionaries
        model_id: int
            unique id for this model

        Side Effects
        ------------
        saves three files:
            - folder / f'bert_dict_{model_id}.pth'
            - folder / f'dropout_dict_{model_id}.pth'
            - folder / f'pretrained_classifiers_dict_{model_id}.pth'
        """
        if hasattr(self, 'pretrained_classifiers'):
            # PyTorch's update method isn't working because it doesn't think ModuleDict is a Mapping
            classifiers_to_save = copy.deepcopy(self.pretrained_classifiers)
            if hasattr(self, 'new_classifiers'):
                for key, module in self.new_classifiers.items():
                    classifiers_to_save[key] = module
        else:
            classifiers_to_save = copy.deepcopy(self.new_classifiers)

        folder = Path(folder)
        folder.mkdir(parents=True, exist_ok=True)

        torch.save(self.bert.state_dict(),
                   folder / f'bert_dict_{model_id}.pth')
        torch.save(self.dropout.state_dict(),
                   folder / f'dropout_dict_{model_id}.pth')

        torch.save(classifiers_to_save.state_dict(),
                   folder / f'pretrained_classifiers_dict_{model_id}.pth')

    def load(self, folder, model_id):
        """
        Loads the model state dicts from a specific folder.

        Parameters
        ----------
        folder: str or Path
            place where state dictionaries are stored
        model_id: int
            unique id for this model

        Side Effects
        ------------
        loads from three files:
            - folder / f'bert_dict_{model_id}.pth'
            - folder / f'dropout_dict_{model_id}.pth'
            - folder / f'pretrained_classifiers_dict_{model_id}.pth'
        """
        folder = Path(folder)

        if torch.cuda.is_available():
            self.bert.load_state_dict(
                torch.load(folder / f'bert_dict_{model_id}.pth'))
            self.dropout.load_state_dict(
                torch.load(folder / f'dropout_dict_{model_id}.pth'))
            self.pretrained_classifiers.load_state_dict(
                torch.load(folder /
                           f'pretrained_classifiers_dict_{model_id}.pth'))
        else:
            self.bert.load_state_dict(
                torch.load(folder / f'bert_dict_{model_id}.pth',
                           map_location=lambda storage, loc: storage))
            self.dropout.load_state_dict(
                torch.load(folder / f'dropout_dict_{model_id}.pth',
                           map_location=lambda storage, loc: storage))
            self.pretrained_classifiers.load_state_dict(
                torch.load(folder /
                           f'pretrained_classifiers_dict_{model_id}.pth',
                           map_location=lambda storage, loc: storage))

    def export(self, folder, model_id, model_name=None):
        """
        Exports the entire model state dict to a specific folder.

        Note: if the model has `pretrained_classifiers` and `new_classifers`,
        they will be combined into the `pretrained_classifiers` attribute before being saved.

        Parameters
        ----------
        folder: str or Path
            place to store state dictionaries
        model_id: int
            unique id for this model
        model_name: str (defaults to None)
            Name to store model under, if None, will default to `multi_task_bert_{model_id}.pth`

        Side Effects
        ------------
        saves one file:
            - folder / model_name
        """
        hold_new_classifiers = copy.deepcopy(self.new_classifiers)
        hold_pretrained_classifiers = None
        if not hasattr(self, 'pretrained_classifiers'):
            self.pretrained_classifiers = copy.deepcopy(self.new_classifiers)
        else:
            hold_pretrained_classifiers = copy.deepcopy(
                self.pretrained_classifiers)
            # PyTorch's update method isn't working because it doesn't think ModuleDict is a Mapping
            for key, module in self.new_classifiers.items():
                self.pretrained_classifiers[key] = module

        del self.new_classifiers

        if model_name is None:
            model_name = f'multi_task_bert_{model_id}.pth'

        folder = Path(folder)
        folder.mkdir(parents=True, exist_ok=True)

        torch.save(self.state_dict(), folder / model_name)
        if hold_pretrained_classifiers is not None:
            self.pretrained_classifiers = hold_pretrained_classifiers
        else:
            del self.pretrained_classifiers
        self.new_classifiers = hold_new_classifiers

    def import_model(self, folder, file):
        """
        Imports the entire model state dict from a specific folder.

        Note: to export a model based on the import_model from this method,
        use the export method


        Parameters
        ----------
        folder: str or Path
            place to store state dictionaries
        file: str
            filename for the exported model object
        """
        folder = Path(folder)
        self.load_state_dict(torch.load(folder / file))
コード例 #3
0
class BertResnetEnsembleForMultiTaskClassification(nn.Module):
    """
    PyTorch ensemble class for multitask learning consisting of a text and image models

    This model is made up of multiple component models:
    - for text: Google's BERT model
    - for images: multiple ResNet50's (the exact number depends on how
    the image model tasks were split up)

    You may need to train the component image and text models first
    before combining them into an ensemble model to get good results.

    Note: For explicitness, `vanilla` refers to the
    `transformers` BERT or `PyTorch` ResNet50 weights while
    `pretrained` refers to previously trained Tonks weights.

    Examples
    --------
    The ensemble model should be used with pretrained
    BERT and ResNet50 component models.
    To initialize a model in this way::

        image_task_dict = {
            'color_pattern': {
                'color': color_train_df['labels'].nunique(),
                'pattern': pattern_train_df['labels'].nunique()
            },
            'dress_sleeve': {
                'dress_length': dl_train_df['labels'].nunique(),
                'sleeve_length': sl_train_df['labels'].nunique()
            },
            'season': {
                'season': season_train_df['labels'].nunique()
            }
        }
        model = BertResnetEnsembleForMultiTaskClassification(
            image_task_dict=image_task_dict
        )

        resnet_model_id_dict = {
            'color_pattern': 'SOME_RESNET_MODEL_ID1',
            'dress_sleeve': 'SOME_RESNET_MODEL_ID2',
            'season': 'SOME_RESNET_MODEL_ID3'
        }

        model.load_core_models(
            folder='SOME_FOLDER',
            bert_model_id='SOME_BERT_MODEL_ID',
            resnet_model_id_dict=resnet_model_id_dict
        )

        # DO SOME TRAINING

        model.save(SOME_FOLDER, SOME_MODEL_ID)

        # OR

        model.export(SOME_FOLDER, SOME_MODEL_ID)

    Parameters
    ----------
    image_task_dict: dict
        dictionary mapping each pretrained ResNet50 models to a dictionary
        of the tasks it was trained on
    dropout: float
        dropout percentage for Dropout layer
    """
    def __init__(self, image_task_dict=None, dropout=1e-1):
        super(BertResnetEnsembleForMultiTaskClassification, self).__init__()

        # Define text architecture
        config = BertConfig()
        self.bert = BertModel(config)
        self.dropout = torch.nn.Dropout(dropout)

        self.image_task_dict = image_task_dict
        self.text_task_dict = self.create_text_dict(image_task_dict)

        # Define image architecture
        image_resnets = {}
        image_dense_layers = {}
        ensemble_layers = {}
        for key in self.image_task_dict.keys():
            resnet = torch_models.resnet50(pretrained=False)
            resnet.fc = _Identity()
            image_resnets[key] = resnet
            image_dense_layers[key] = nn.Sequential(
                _dense_block(2048 * 2, 1024, 2e-3),
                _dense_block(1024, 512, 2e-3), _dense_block(512, 256, 2e-3))

            # Define final ensemble before classifier layers
            # The input is size 768 from BERT and 256 from ResNet50 models
            # so the total size is 1024
            ensemble_layers[key] = nn.Sequential(
                _dense_block(1024, 512, 2e-3),
                _dense_block(512, 512, 2e-3),
                _dense_block(512, 256, 2e-3),
            )

        self.image_resnets = nn.ModuleDict(image_resnets)
        self.image_dense_layers = nn.ModuleDict(image_dense_layers)
        self.ensemble_layers = nn.ModuleDict(ensemble_layers)

        pretrained_layers = {}
        for key, task_size in self.text_task_dict.items():
            pretrained_layers[key] = nn.Linear(256, task_size)
        self.classifiers = nn.ModuleDict(pretrained_layers)

    def forward(self, x):
        """
        Defines forward pass for ensemble model

        Parameters
        ----------
        x: dict
            dictionary of torch tensors with keys:
                - `bert_text`: integers mapping to BERT vocabulary
                - `full_img`: tensor of full image
                - `crop_img`: tensor of cropped image

        Returns
        ----------
        A dictionary mapping each task to its logits
        """
        bert_output = self.bert(x['bert_text'])

        pooled_output = self.dropout(bert_output[1])

        logit_dict = {}

        for key in self.image_task_dict.keys():
            full_img = self.image_resnets[key](x['full_img'])
            crop_img = self.image_resnets[key](x['crop_img'])
            full_crop_combined = torch.cat((full_img, crop_img), 1)
            dense_layer_output = self.image_dense_layers[key](
                full_crop_combined)
            ensemble_input = torch.cat((pooled_output, dense_layer_output), 1)
            ensemble_layer_output = self.ensemble_layers[key](ensemble_input)

            for task in self.image_task_dict[key].keys():
                classifier = self.classifiers[task]
                logit_dict[task] = classifier(ensemble_layer_output)

        return logit_dict

    def freeze_bert(self):
        """Freeze all core BERT layers"""
        for param in self.bert.parameters():
            param.requires_grad = False

    def freeze_resnets(self):
        """Freeze all core ResNet models layers"""
        for key in self.image_resnets.keys():
            for param in self.image_resnets[key].parameters():
                param.requires_grad = False
            for param in self.image_dense_layers[key].parameters():
                param.requires_grad = False

    def freeze_ensemble_layers(self):
        """Freeze all final ensemble layers"""
        for key in self.ensemble_layers.keys():
            for param in self.ensemble_layers[key].parameters():
                param.requires_grad = False

    def freeze_classifiers_and_core(self):
        """Freeze pretrained classifier layers and core BERT/ResNet layers"""
        self.freeze_bert()
        self.freeze_resnets()
        self.freeze_ensemble_layers()
        for param in self.classifiers.parameters():
            param.requires_grad = False

    def unfreeze_classifiers(self):
        """Unfreeze pretrained classifier layers"""
        for param in self.classifiers.parameters():
            param.requires_grad = True

    def unfreeze_classifiers_and_core(self):
        """Unfreeze pretrained classifiers and core BERT/ResNet layers"""
        for param in self.bert.parameters():
            param.requires_grad = True
        for key in self.image_resnets.keys():
            for param in self.image_resnets[key].parameters():
                param.requires_grad = True
            for param in self.image_dense_layers[key].parameters():
                param.requires_grad = True
            for param in self.ensemble_layers[key].parameters():
                param.requires_grad = True

        self.unfreeze_classifiers()

    def save(self, folder, model_id):
        """
        Saves the model state dicts to a specific folder.
        Each part of the model is saved separately,
        along with the image_task_dict, which is needed to reinstantiate the model.

        Parameters
        ----------
        folder: str or Path
            place to store state dictionaries
        model_id: int
            unique id for this model

        Side Effects
        ------------
        saves six files:
            - folder / f'bert_dict_{model_id}.pth'
            - folder / f'dropout_dict_{model_id}.pth'
            - folder / f'image_resnets_dict_{model_id}.pth'
            - folder / f'image_dense_layers_dict_{model_id}.pth'
            - folder / f'ensemble_layers_dict_{model_id}.pth'
            - folder / f'classifiers_dict_{model_id}.pth'
        """
        folder = Path(folder)
        folder.mkdir(parents=True, exist_ok=True)

        # BERT model
        torch.save(self.bert.state_dict(),
                   folder / f'bert_dict_{model_id}.pth')
        torch.save(self.dropout.state_dict(),
                   folder / f'dropout_dict_{model_id}.pth')

        # ResNet model(s)
        torch.save(self.image_resnets.state_dict(),
                   folder / f'image_resnets_dict_{model_id}.pth')
        torch.save(self.image_dense_layers.state_dict(),
                   folder / f'image_dense_layers_dict_{model_id}.pth')

        # Ensemble layers
        torch.save(self.ensemble_layers.state_dict(),
                   folder / f'ensemble_layers_dict_{model_id}.pth')

        # Classifier layers
        torch.save(self.classifiers.state_dict(),
                   folder / f'classifiers_dict_{model_id}.pth')

        # image_task_dict
        joblib.dump(self.image_task_dict,
                    folder / f'image_task_dict_{model_id}.pickle')

    def load(self, folder, model_id):
        """
        Loads the model state dicts for ensemble model
        from a specific folder. This will load all the model
        components including the final ensemble and existing
        pretrained `classifiers`.

        Parameters
        ----------
        folder: str or Path
            place where state dictionaries are stored
        model_id: int
            unique id for this model

        Side Effects
        ------------
        loads from six files:
            - folder / f'bert_dict_{model_id}.pth'
            - folder / f'dropout_dict_{model_id}.pth'
            - folder / f'image_resnets_dict_{model_id}.pth'
            - folder / f'dense_layers_dict_{model_id}.pth'
            - folder / f'ensemble_layers_dict_{model_id}.pth'
            - folder / f'classifiers_dict_{model_id}.pth'

        """
        folder = Path(folder)

        if torch.cuda.is_available():
            self.bert.load_state_dict(
                torch.load(folder / f'bert_dict_{model_id}.pth'))
            self.dropout.load_state_dict(
                torch.load(folder / f'dropout_dict_{model_id}.pth'))

            self.image_resnets.load_state_dict(
                torch.load(folder / f'image_resnets_dict_{model_id}.pth'))
            self.dense_layers.load_state_dict(
                torch.load(folder / f'image_dense_layers_dict_{model_id}.pth'))

            self.ensemble_layers.load_state_dict(
                torch.load(folder / f'ensemble_layers_dict_{model_id}.pth'))
            self.classifiers.load_state_dict(
                torch.load(folder / f'classifiers_dict_{model_id}.pth'))
        else:
            self.bert.load_state_dict(
                torch.load(folder / f'bert_dict_{model_id}.pth',
                           map_location=lambda storage, loc: storage))
            self.dropout.load_state_dict(
                torch.load(folder / f'dropout_dict_{model_id}.pth',
                           map_location=lambda storage, loc: storage))

            self.resnet.load_state_dict(
                torch.load(folder / f'image_resnets_dict_{model_id}.pth',
                           map_location=lambda storage, loc: storage))
            self.dense_layers.load_state_dict(
                torch.load(folder / f'image_dense_layers_dict_{model_id}.pth',
                           map_location=lambda storage, loc: storage))

            self.final_ensemble.load_state_dict(
                torch.load(folder / f'ensemble_layers_dict_{model_id}.pth',
                           map_location=lambda storage, loc: storage))

            self.classifiers.load_state_dict(
                torch.load(folder / f'classifiers_dict_{model_id}.pth',
                           map_location=lambda storage, loc: storage))

    def load_core_models(self, folder, bert_model_id, resnet_model_id_dict):
        """
        Loads the weights from pretrained BERT and ResNet50 Tonks models

        Does not load weights from the final ensemble and classifier layers.
        use case is for loading SR_pretrained component BERT and image model
        weights into a new ensemble model.

        Parameters
        ----------
        folder: str or Path
            place where state dictionaries are stored
        bert_model_id: int
            unique id for pretrained BERT text model
        resnet_model_id: int
            unique id for pretrained image model

        Side Effects
        ------------
        loads from four files:
            - folder / f'bert_dict_{bert_model_id}.pth'
            - folder / f'dropout_dict_{bert_model_id}.pth'
            - folder / f'resnet_dict_{resnet_model_id}.pth'
            - folder / f'dense_layers_dict_{resnet_model_id}.pth'
        """
        folder = Path(folder)

        if torch.cuda.is_available():
            self.bert.load_state_dict(
                torch.load(folder / f'bert_dict_{bert_model_id}.pth'))
            self.dropout.load_state_dict(
                torch.load(folder / f'dropout_dict_{bert_model_id}.pth'))

            for key, model_id in resnet_model_id_dict.items():
                self.image_resnets[key].load_state_dict(
                    torch.load(folder / f'resnet_dict_{model_id}.pth'))
                self.image_dense_layers[key].load_state_dict(
                    torch.load(folder / f'dense_layers_dict_{model_id}.pth'))

        else:
            self.bert.load_state_dict(
                torch.load(folder / f'bert_dict_{bert_model_id}.pth',
                           map_location=lambda storage, loc: storage))
            self.dropout.load_state_dict(
                torch.load(folder / f'dropout_dict_{bert_model_id}.pth',
                           map_location=lambda storage, loc: storage))

            for key, model_id in resnet_model_id_dict.items():
                self.image_resnets[key].load_state_dict(
                    torch.load(folder / f'resnet_dict_{model_id}.pth'),
                    map_location=lambda storage, loc: storage)
                self.image_dense_layers[key].load_state_dict(
                    torch.load(folder / f'dense_layers_dict_{model_id}.pth'),
                    map_location=lambda storage, loc: storage)

    def export(self, folder, model_id, model_name=None):
        """
        Exports the entire model state dict to a specific folder,
        along with the image_task_dict, which is needed to reinstantiate the model.

        Parameters
        ----------
        folder: str or Path
            place to store state dictionaries
        model_id: int
            unique id for this model
        model_name: str (defaults to None)
            Name to store model under, if None, will default to `multi_task_ensemble_{model_id}.pth`

        Side Effects
        ------------
        saves two files:
            - folder / f'multi_task_ensemble_{model_id}.pth'
            - folder / f'image_task_dict_{model_id}.pickle'
        """
        folder = Path(folder)
        folder.mkdir(parents=True, exist_ok=True)

        if model_name is None:
            model_name = f'multi_task_ensemble_{model_id}.pth'

        torch.save(self.state_dict(), folder / model_name)

        joblib.dump(self.image_task_dict,
                    folder / f'image_task_dict_{model_id}.pickle')

    @staticmethod
    def create_text_dict(image_task_dict):
        """Create a task dict for the text model from the image task dict"""
        text_task_dict = {}
        for joint_task in image_task_dict.keys():
            for task, task_size in image_task_dict[joint_task].items():
                if task in text_task_dict.keys():
                    raise TonksError(
                        'Task {} is in multiple models. Each task can only be in one image model.'
                        .format(task))
                text_task_dict[task] = task_size

        return text_task_dict