Beispiel #1
0
    def __init__(self):
        super(Model, self).__init__()
        size = 512
        dataset.bootstrap()
        self.embedding = nn.Embedding(num_embeddings=len(
            dataset.voc_src.index2word),
                                      embedding_dim=size)
        self.positional_embedding = PositionalEncoding(
            self.embedding.embedding_dim, 0.1, 5000)

        self.stacked_layers = nn.Sequential(
            *[Layer(AttentionMultipleHead(6, size)) for _ in range(8)])
        self.output_projection = nn.Linear(size,
                                           len(dataset.voc_tgt.index2word))
Beispiel #2
0

if __name__ == '__main__':
    logging.basicConfig(level=logging.INFO)

    EXP_ID = '1.4'
    ROOT_DIR = '/source/main/train/output/'
    BATCH_SIZE = 32
    NUM_EPOCHS = 1000
    NUM_WORKERS = 0
    PRINT_EVERY = 100
    PREDICT_EVERY = 5000
    EVAL_EVERY = 10000
    PRE_TRAINED_MODEL = ''

    my_dataset.bootstrap()
    train_loader, eval_loader = my_dataset.get_datasets_2(BATCH_SIZE)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = Model(Model.get_word_embedding(),
                  d_model=constants.d_model,
                  num_layers=constants.num_layers,
                  num_heads=constants.num_heads,
                  rate=constants.rate,
                  bos_id=2,
                  eos_id=3)
    model.to(device)
    logging.info('Total trainable parameters: %s',
                 pytorch_utils.count_parameters(model))
    model_training = ModelTraining(model)