def __init__(self, embedding, batch_size):
        TEXT, vocab_size, word_embeddings, self.train_iter, self.valid_iter, self.test_iter = load_dataset.load(embedding=embedding, batch_size=batch_size)
        self.embedding = embedding

        output_size = 10
        learning_rate = 2e-5

        self.model = LogisticRegressionModel(output_size, vocab_size, 300, word_embeddings)

        optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, self.model.parameters()), weight_decay=0.0005, lr=0.0001)
        loss_fn = F.cross_entropy
        self.training_handler = TrainingHandler(optimizer, loss_fn, batch_size)
Exemple #2
0
    def __init__(self, embedding, batch_size):
        TEXT, vocab_size, word_embeddings, self.train_iter, self.valid_iter, self.test_iter = load_dataset.load(
            embedding=embedding, batch_size=batch_size)
        self.embedding = embedding

        output_size = 10
        hidden_size = 256
        embedding_length = 300

        self.model = LSTMClassifier(batch_size, output_size, hidden_size,
                                    vocab_size, embedding_length,
                                    word_embeddings)

        optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                            self.model.parameters()),
                                     weight_decay=0.0005,
                                     lr=0.0001)
        loss_fn = F.cross_entropy
        self.training_handler = TrainingHandler(optimizer, loss_fn, batch_size)
Exemple #3
0
    def __init__(self, embedding, batch_size):
        TEXT, vocab_size, word_embeddings, self.train_iter, self.valid_iter, self.test_iter = load_dataset.load(
            embedding=embedding, batch_size=batch_size)
        self.embedding = embedding

        output_size = 10
        in_channel = 1
        out_channel = 16
        kernel_heights = [3, 5, 7]
        keep_probab = 0
        stride = 1
        padding = [1, 2, 3]
        embedding_length = 300

        self.model = CNN(batch_size, output_size, in_channel, out_channel,
                         kernel_heights, stride, padding, keep_probab,
                         vocab_size, embedding_length, word_embeddings)

        optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                            self.model.parameters()),
                                     weight_decay=0.0005,
                                     lr=0.0001)
        loss_fn = F.cross_entropy
        self.training_handler = TrainingHandler(optimizer, loss_fn, batch_size)