def __init__(self, n_classes, model_config):
        super().__init__()
        self.device = model_config.device

        self.embedder = initialize_embeddings(
            model_config.embedding_type,
            model_config.device,
            fine_tune_embeddings=model_config.fine_tune_embeddings)
        self.embedder = self.embedder.to(self.device)

        self.gru1 = nn.GRU(768,
                           model_config.gru_hidden_size,
                           num_layers=model_config.num_gru_layers,
                           bidirectional=True)
        self.gru = nn.GRU(2 * model_config.gru_hidden_size,
                          model_config.gru_hidden_size,
                          num_layers=model_config.num_gru_layers,
                          bidirectional=True)
        self.classifier = nn.Sequential(
            nn.Linear(2 * model_config.gru_hidden_size,
                      model_config.linear_hidden_size),
            nn.ReLU(),
            nn.Dropout(p=model_config.dropout),
            nn.Linear(model_config.linear_hidden_size, n_classes),
        )
        for layer in self.classifier:
            if (isinstance(layer, nn.Linear)):
                torch.nn.init.xavier_normal_(layer.weight)
예제 #2
0
 def __init__(self, n_classes, model_config):
     super().__init__()
     self.embedder = initialize_embeddings(
         model_config.embedding_type,
         model_config.device,
         fine_tune_embeddings=model_config.fine_tune_embeddings)
     emb_sum_sizes = sum(
         [e.embedding_length for e in self.embedder.embeddings])
     self.emb_weights = torch.nn.Parameter(
         torch.ones([emb_sum_sizes], requires_grad=True))
     self.lstm = nn.LSTM(emb_sum_sizes,
                         model_config.lstm_hidden_size,
                         num_layers=model_config.num_lstm_layers,
                         bidirectional=True,
                         dropout=model_config.dropout)
     self.lin = nn.Linear(2 * model_config.lstm_hidden_size, n_classes)
     self.dropout = nn.Dropout(model_config.dropout)
    def __init__(self, n_classes, model_config):
        super().__init__()
        self.device = model_config.device

        # Initialize embedder for use by training loop
        self.embedder = initialize_embeddings(
            model_config.embedding_type,
            model_config.device,
            fine_tune_embeddings=model_config.fine_tune_embeddings)
        self.embedder = self.embedder.to(self.device)

        self.num_grus = model_config.num_grus
        assert 12 % self.num_grus == 0
        self.num_combined_per_gru = int(
            12 /
            self.num_grus)  # Number of BERT hidden layers combined per GRU

        # Initialize combining GRUs (first layer)
        self.grus = [
            nn.GRU(self.num_combined_per_gru * 768,
                   model_config.gru_hidden_size,
                   num_layers=model_config.num_gru_layers,
                   bidirectional=True) for _ in range(self.num_grus)
        ]

        # Initialize combining GRU (second layer)
        self.gru = nn.GRU(2 * self.num_grus * model_config.gru_hidden_size,
                          model_config.gru_hidden_size,
                          num_layers=model_config.num_gru_layers,
                          bidirectional=True)

        # Initialize classifier (after document embedding is complete using two layers of GRUs)
        self.classifier = nn.Sequential(
            nn.Linear(2 * model_config.gru_hidden_size,
                      model_config.linear_hidden_size),
            nn.ReLU(),
            nn.Dropout(p=model_config.dropout),
            nn.Linear(model_config.linear_hidden_size, n_classes),
        )

        # Initialize layers
        for layer in self.classifier:
            if (isinstance(layer, nn.Linear)):
                torch.nn.init.xavier_normal_(layer.weight)
예제 #4
0
파일: app.py 프로젝트: p-severin/ml_repo
def get_mappings(tokens):
    word_to_id = dict()
    id_to_word = dict()
    unique_words = get_unique_words(tokens)
    for i, word in enumerate(unique_words):
        word_to_id[word] = i
        id_to_word[i] = word

    return word_to_id, id_to_word


if __name__ == '__main__':
    tokens = generate_tokens(texts[:2])
    unique_words = get_unique_words(tokens)
    nb_tokens = len(unique_words)
    word_to_id, id_to_word = get_mappings(tokens)

    X, y = generate_training_data(tokens, word_to_id, window_size)
    y_one_hot = numpy.zeros((len(X), nb_tokens))
    y_one_hot[numpy.arange(len(X)), y] = 1

    embeddings = initialize_embeddings(nb_tokens, embedding_size)
    print(embeddings)

    # counter = Counter(tokens)
    # print(X)
    # vectorizer = CountVectorizer(min_df=10, stop_words=stop_words.ENGLISH_STOP_WORDS)
    # transformed = vectorizer.fit_transform(tokens)
    # vocabulary = vectorizer.vocabulary_.keys()
    # print(len(tokens))