Пример #1
0
    def __init__(self,input_size,output_size, n_features, d_model=256,nhead=8, num_layers=3, dropout=0.1):
        super(TransformerDecoderModel, self).__init__()

        self.d_model = d_model
        self.criterion = nn.L1Loss()
        self.warmup_steps = 4000

        self.output_size = output_size
        self.n_features = n_features



        self.encoder = nn.Linear(n_features, d_model)
        self.pos_encoder = PositionalEncoding(d_model, dropout)

        self.decoder = nn.Linear(n_features, d_model)
        self.pos_decoder = PositionalEncoding(d_model, dropout) 

        decoder_layer = nn.TransformerDecoderLayer(d_model=d_model, nhead=nhead,dim_feedforward=d_model*4, dropout=dropout, activation='relu')
        self.transformer_decoder  = nn.TransformerDecoder(decoder_layer, num_layers=num_layers)
        self.fc_out = nn.Linear(d_model, n_features)

        self.src_mask = None
        self.trg_mask = None
        self.memory_mask = None
Пример #2
0
    def __init__(self,
                 ntokens,
                 nfeat,
                 nhead,
                 nlayer,
                 nff,
                 maxlen=64,
                 dropout=0.1,
                 act_fn='relu',
                 beam_size=-1):
        super().__init__()

        self.token_embed = nn.Embedding(ntokens, nfeat)
        self.src_pos_embed = PosEmbedding(maxlen, nfeat)
        self.tgt_pos_embed = PosEmbedding(maxlen, nfeat)

        self.beam_size = beam_size

        encoder_layer = nn.TransformerEncoderLayer(nfeat, nhead, nff, dropout,
                                                   act_fn)
        self.encoder = nn.TransformerEncoder(encoder_layer, nlayer,
                                             nn.LayerNorm(nfeat))
        decoder_layer = nn.TransformerDecoderLayer(nfeat, nhead, nff, dropout,
                                                   act_fn)
        self.decoder = nn.TransformerDecoder(decoder_layer, nlayer,
                                             nn.LayerNorm(nfeat))

        self.proj = nn.Linear(nfeat, ntokens)
Пример #3
0
    def __init__(
        self,
        num_layers: int = 3,
        d_model: int = 512,
        num_heads: int = 8,
        dff: int = 2048,
        vocab_size: int = 120,
        maximum_position_encoding: int = 50,
        dropout: float = 0.2,
    ) -> None:
        super().__init__()

        self.d_model = d_model
        self.num_layers = num_layers

        self.embedding = nn.Embedding(vocab_size + 3,
                                      d_model)  # 3 more classes EOS/SOS/PAD
        self.register_buffer(
            'pos_encoding',
            positional_encoding(maximum_position_encoding, d_model))

        self.dec_layers = nn.ModuleList([
            nn.TransformerDecoderLayer(
                d_model=d_model,
                nhead=num_heads,
                dim_feedforward=dff,
                dropout=dropout,
                activation='relu',
                batch_first=True,
            ) for _ in range(num_layers)
        ])

        self.dropout = nn.Dropout(dropout)
Пример #4
0
    def __init__(self,
                 num_emb: int,
                 emb_dim: int,
                 nhead: int = 8,
                 ff_dim: int = 2048,
                 num_enc_layers: int = 6,
                 num_dec_layers: int = 6,
                 activation: str = "relu"):
        super(TransformerS2S, self).__init__()

        self.emb = nn.Embedding(num_emb, emb_dim)
        self.pe = PositionalEncoding(emb_dim)
        l_norm = nn.LayerNorm(emb_dim)
        tel = nn.TransformerEncoderLayer(emb_dim,
                                         nhead,
                                         ff_dim,
                                         activation=activation)
        tdl = nn.TransformerDecoderLayer(emb_dim,
                                         nhead,
                                         ff_dim,
                                         activation=activation)
        self.enc = nn.TransformerEncoder(tel, num_enc_layers, norm=l_norm)
        self.dec = nn.TransformerDecoder(tdl, num_dec_layers, norm=l_norm)
        self.lin = nn.Linear(emb_dim, num_emb)
        self.emb_scale = math.sqrt(emb_dim)
Пример #5
0
    def __init__(self, input_len, output_len, d_model, vocab_size):
        super(CommentEncodedtoSkills, self).__init__()
        #self.arg = arg
        self.output_len = output_len
        self.input_len = input_len
        self.d_model = d_model
        self.vocab_size = vocab_size
        self.pos_encoder = PositionalEncoding(d_model)
        self.encoder_layer = nn.TransformerEncoderLayer(d_model=self.d_model,
                                                        nhead=8).to(device)

        self.encoder = nn.TransformerEncoder(encoder_layer=self.encoder_layer,
                                             num_layers=8).to(device)

        self.decoder_layer = nn.TransformerDecoderLayer(d_model=self.d_model,
                                                        nhead=8).to(device)

        self.decoder = nn.TransformerDecoder(decoder_layer=self.decoder_layer,
                                             num_layers=8).to(device)

        self.decoder_emb = nn.Embedding(self.vocab_size, self.d_model)

        self.predictor = nn.Linear(self.d_model, self.vocab_size)
        self.soft = nn.Softmax(dim=0)
        self.probHead = nn.Linear(self.d_model, 1)
Пример #6
0
 def __init__(self):
     super(DynamicQuantModule.M, self).__init__()
     self.rnn = nn.RNN(4, 8, 2)
     self.rnncell = nn.RNNCell(4, 8)
     self.gru = nn.GRU(4, 8, 2)
     self.grucell = nn.GRUCell(4, 8)
     self.lstm = nn.LSTM(4, 8, 2)
     self.lstmcell = nn.LSTMCell(4, 8)
     self.linears = nn.ModuleList([
         nn.Identity(54),
         nn.Linear(20, 20),
         nn.Bilinear(20, 20, 40),
     ])
     self.transformers = nn.ModuleList([
         nn.Transformer(d_model=2,
                        nhead=2,
                        num_encoder_layers=1,
                        num_decoder_layers=1),
         nn.TransformerEncoder(nn.TransformerEncoderLayer(d_model=2,
                                                          nhead=2),
                               num_layers=1),
         nn.TransformerDecoder(nn.TransformerDecoderLayer(d_model=2,
                                                          nhead=2),
                               num_layers=1),
     ])
Пример #7
0
 def __init__(self, vocab_size):
     super().__init__()
     decoder_layer = nn.TransformerDecoderLayer(768, 2, 1024, dropout=0.1)
     self.transformer_decoder = nn.TransformerDecoder(decoder_layer, 2)
     self.decoder = nn.Embedding(vocab_size, 768)
     self.pos_decoder = PositionalEncoding(768, 0.5)
     self.fc = nn.Linear(768, vocab_size)
Пример #8
0
 def __init__(self,
              input_dim,
              output_dim,
              nhead=4,
              d_model=128,
              num_layers=6,
              dim_feedforward=256,
              quantile=False):
     super(Transformer, self).__init__()
     encoder_layer = nn.TransformerEncoderLayer(d_model,
                                                nhead,
                                                dim_feedforward,
                                                dropout=0)
     decoder_layer = nn.TransformerDecoderLayer(d_model,
                                                nhead,
                                                dim_feedforward,
                                                dropout=0)
     self.transformer_encoder = nn.TransformerEncoder(encoder_layer,
                                                      num_layers=num_layers)
     self.transformer_decoder = nn.TransformerDecoder(decoder_layer,
                                                      num_layers=num_layers)
     self.embedding = nn.Linear(input_dim, d_model)
     self.output_layer = nn.Linear(d_model, output_dim)
     self.output_dim = output_dim
     self.quantile = quantile
     if self.quantile:
         self.quantile = nn.Linear(1, 3)
Пример #9
0
    def __init__(self, sent_encoder, graph_encoder, num_entity: int,
                 num_relation: int, total_word: int, dim: int, sent_len: int):
        super(GCAKE, self).__init__()

        self.ent_embedding = nn.Embedding(num_entity, dim)
        self.rel_embedding = nn.Embedding(num_relation, dim)
        self.word_embedding = nn.Embedding(total_word, dim)
        # embed_dim must be divisible by num_heads
        self.triples_encoder = TriplesEncoder(d_model=dim,
                                              nhead=4,
                                              dim_feedforward=2048,
                                              num_layers=3)
        self.sent_encoder = sent_encoder
        self.second_triple_encoder = nn.TransformerDecoderLayer(
            d_model=dim, nhead=4,
            dim_feedforward=2048)  # TODO: connect graph eoncoder to it
        self.graph_encoder = graph_encoder
        self.graph_encoder.set_ent_embeddings(self.ent_embedding)

        self.classifier = nn.Sequential(
            nn.Linear(in_features=3 * dim, out_features=1),
            # nn.ReLU(),
            # nn.Linear(in_features=dim, out_features=1),
            nn.Sigmoid())

        self.layer_norm = nn.LayerNorm(dim)
Пример #10
0
    def __init__(self, vocab: int, max_output_length: int, dim: int = 128):

        super().__init__()

        # Parameters
        self.dim = dim
        self.max_output_length = max_output_length
        nhead = 4
        num_layers = 4
        dim_feedforward = dim

        # Encoder part
        self.embedding = nn.Embedding(vocab, dim)
        self.pos_encoder = PositionalEncoding(d_model=self.dim)
        self.transformer_encoder = nn.TransformerEncoder(
            encoder_layer=nn.TransformerEncoderLayer(
                d_model=self.dim, nhead=nhead,
                dim_feedforward=dim_feedforward),
            num_layers=num_layers)

        # Decoder part
        self.y_mask = generate_triangular_mask(self.max_output_length)
        self.transformer_decoder = nn.TransformerDecoder(
            decoder_layer=nn.TransformerDecoderLayer(
                d_model=self.dim, nhead=nhead,
                dim_feedforward=dim_feedforward),
            num_layers=num_layers)
        self.fc = nn.Linear(self.dim, vocab)

        # It is empirically important to initialize weights properly
        self.init_weights()
Пример #11
0
    def __init__(self, input_dim, target_vocab_size, d_model, nhead,
                 dim_feedforward, num_encoder_layers, num_decoder_layers,
                 decoder_embed_dim, dropout):
        super().__init__()
        self.encoder_conv_context = EncoderConvContext(input_dim=input_dim,
                                                       d_model=d_model)
        encoder_layer = nn.TransformerEncoderLayer(
            d_model=d_model,
            nhead=nhead,
            dim_feedforward=dim_feedforward,
            dropout=dropout)
        encoder_norm = nn.LayerNorm(d_model)
        self.encoder = nn.TransformerEncoder(encoder_layer, num_encoder_layers,
                                             encoder_norm)

        self.decoder_embed = nn.Embedding(target_vocab_size,
                                          decoder_embed_dim,
                                          padding_idx=0)
        self.decoder_conv_context = DecoderConvContext(
            input_dim=decoder_embed_dim,
            output_dim=d_model,
            out_channels=256,
            kernel_size=3,
            num_conv_layers=4)
        decoder_layer = nn.TransformerDecoderLayer(
            d_model=d_model,
            nhead=nhead,
            dim_feedforward=dim_feedforward,
            dropout=dropout)
        decoder_norm = nn.LayerNorm(d_model)
        self.decoder = nn.TransformerDecoder(decoder_layer, num_decoder_layers,
                                             decoder_norm)

        self.linear = nn.Linear(d_model, target_vocab_size)
Пример #12
0
    def __init__(self, numberTokens, maxLength, numberEncoderLayers,
                 numberDecoderLayers, attentionHeadCount,
                 transformerHiddenDenseSize, linearHiddenSize):
        # Based on https://pytorch.org/tutorials/beginner/transformer_tutorial.html
        super(Transformer, self).__init__()
        self.model_type = 'Transformer'
        embeddingSize = 300
        self.embeddingSize = embeddingSize
        self.numberTokens = numberTokens

        self.encoderEmbedding = nn.Embedding(numberTokens, embeddingSize)
        self.maxLength = maxLength

        encoderLayer = nn.TransformerEncoderLayer(embeddingSize,
                                                  attentionHeadCount,
                                                  transformerHiddenDenseSize)

        self.encoder = nn.TransformerEncoder(encoderLayer, numberEncoderLayers)

        self.decoderEmbedding = nn.Embedding(numberTokens, embeddingSize)

        decoderLayer = nn.TransformerDecoderLayer(embeddingSize,
                                                  attentionHeadCount,
                                                  transformerHiddenDenseSize)

        self.decoder = nn.TransformerDecoder(decoderLayer, numberDecoderLayers)
        self.decoderLinear = nn.Linear(embeddingSize, numberTokens)
Пример #13
0
 def _get_transformer_decoder(self, n_layers: int):
     decoder_layer1 = nn.TransformerDecoderLayer(
         d_model=self.rnn_hidden_size * 2,
         nhead=self.num_attention_heads,
         dropout=self.attention_dropout,
         dim_feedforward=2000)
     return nn.TransformerDecoder(decoder_layer1, n_layers)
Пример #14
0
    def __init__(self,
                 src_vocab,
                 tgt_vocab,
                 dim_embeddings=512,
                 n_heads=8,
                 ff_dim=512,
                 n_layers=3,
                 dropout=0.1):
        super(Seq2SeqTransformer, self).__init__()

        self.emb_dim = dim_embeddings

        self.src_embeddings = TokenEmbedding(src_vocab, dim_embeddings)
        self.tgt_embeddings = TokenEmbedding(tgt_vocab, dim_embeddings)
        self.pe = PositionalEncoding(dim_embeddings, dropout=dropout)

        # Encoder model
        encoder_norm = nn.LayerNorm(dim_embeddings)
        enc_layer = nn.TransformerEncoderLayer(dim_embeddings, n_heads, ff_dim)
        self.encoder = nn.TransformerEncoder(enc_layer,
                                             num_layers=n_layers,
                                             norm=encoder_norm)

        # Decoder model
        dec_layer = nn.TransformerDecoderLayer(dim_embeddings, n_heads, ff_dim)
        decoder_norm = nn.LayerNorm(dim_embeddings)
        self.decoder = nn.TransformerDecoder(dec_layer,
                                             num_layers=n_layers,
                                             norm=decoder_norm)

        # Generator
        self.generator = nn.Linear(dim_embeddings, tgt_vocab)
Пример #15
0
    def __init__(self,
                 d_model=512,
                 nhead=8,
                 num_decoder_layers=6,
                 dropout=0.3,
                 attention_dropout=0.1,
                 activation='relu',
                 tgt_dictionary=None):

        super(TransformerDecoder, self).__init__()
        self.dictionary = tgt_dictionary
        self.embedding = Embedding(len(self.dictionary),
                                   d_model,
                                   padding_idx=self.dictionary.pad_index)
        self.num_layers = num_decoder_layers
        self.layer = nn.TransformerDecoder(
            nn.TransformerDecoderLayer(d_model=d_model,
                                       nhead=nhead,
                                       dim_feedforward=d_model * 4,
                                       dropout=attention_dropout,
                                       activation=activation),
            num_layers=self.num_layers,
        )
        self.output_layer = Linear(d_model, len(self.dictionary), bias=True)
        self.d_model = d_model
        self.position_embedding = Embedding(MAX_POSITIONS, d_model)
        self.layer_norm = nn.LayerNorm(d_model, eps=1e-12)
        self.dropout = dropout
        self.loss_fn = LabelSmoothingLoss(len(tgt_dictionary), 0.1)
Пример #16
0
    def __init__(self, d_model, seq_len, nhead, dim_feedforward, dropout,
                 num_layers):
        super(Generator, self).__init__()

        self.inp = torch.nn.Parameter(torch.randn(seq_len, 51))
        self.inp.requires_grad = True

        # memoryの処理(実データを使わないときに使用)
        self.z_layer = nn.Linear(d_model, d_model * seq_len)
        self.z_norm = nn.LayerNorm(d_model)  # Transformer-Encoderの処理に合わせる

        # 時系列データの入力処理
        self.embedding_layer = nn.Linear(51, d_model)

        # Positional Encodeingの処理
        self.positionalencoding_layer = PositionalEncoding(d_model, seq_len)

        # Dropoutの処理
        self.dropout_layer = nn.Dropout(p=dropout)

        # Transformer decoder側の処理 default dim_feedforward =2048
        self.decoder_layer = nn.TransformerDecoderLayer(
            d_model=d_model,
            nhead=nhead,
            dim_feedforward=dim_feedforward,
            dropout=dropout)
        self.transformer_decoder = nn.TransformerDecoder(self.decoder_layer,
                                                         num_layers=num_layers)

        #時系列データの出力処理
        self.output_layer = nn.Linear(d_model, 51)
Пример #17
0
def _build_transformer_decoder(
    d_model: int,
    nhead: int,
    num_decoder_layers: int,
    dim_feedforward: int,
    dropout: float,
) -> nn.TransformerDecoder:
    """build transformer decoder with params
    Parameters
    ----------
    d_model : int
    nhead : int
    num_decoder_layers : int
    dim_feedforward : int
    dropout : float
    Returns
    -------
    nn.TransformerDecoder
    """
    decoder_layer = nn.TransformerDecoderLayer(
        d_model=d_model,
        nhead=nhead,
        dim_feedforward=dim_feedforward,
        dropout=dropout,
    )
    decoder_norm = nn.LayerNorm(d_model)

    decoder = TransformerDecoder(decoder_layer, num_decoder_layers,
                                 decoder_norm)
    return decoder
Пример #18
0
    def __init__(self, hparams):
        super(Vae, self).__init__()
        self.hparams = asdict(hparams)
        self.save_hyperparameters()

        self.encoder = nn.TransformerEncoder(
            nn.TransformerEncoderLayer(self.hparams.hidden_size, nhead=8),
            num_layers=self.hparams.number_of_encoder_layer)
        self.decoder = nn.TransformerDecoder(
            nn.TransformerDecoderLayer(self.hparams.hidden_size, nhead=8),
            num_layers=self.hparams.number_of_decoder_layer)

        self.hidden2mean = nn.Linear(self.hparams.hidden_size,
                                     self.hparams.latent_dim)
        self.hidden2logv = nn.Linear(self.hparams.hidden_size,
                                     self.hparams.latent_dim)
        self.latent2hidden = nn.Linear(self.hparams.latent_dim,
                                       self.hparams.hidden_size)

        if hparams.pretrained_embedding_path is not None:
            self.word_embedding = nn.Embedding.from_pretrained(
                load_pretrained_embedding_tensor(
                    hparams.pretrained_embedding_path))
        else:
            self.word_embedding = nn.Embedding(self.hparams.vocab_size,
                                               self.hparams.embedding_dim)
        self.pos_embedding = PositionalEncoding(
            self.hparams.hidden_size, max_len=self.hparams.max_sent_len)

        self.hidden2vocab = nn.Linear(self.hparams.hidden_size,
                                      self.hparams.vocab_size)
        if self.hparams.tie_weights:
            self.hidden2vocab.weight = self.word_embedding.weight

        self._init_weights()
    def __init__(
        self,
        data_config: Dict[str, Any],
        args: argparse.Namespace = None,
    ) -> None:
        super().__init__()
        self.data_config = data_config
        self.input_dims = data_config["input_dims"]
        self.num_classes = len(data_config["mapping"])
        inverse_mapping = {
            val: ind
            for ind, val in enumerate(data_config["mapping"])
        }
        self.start_token = inverse_mapping["<S>"]
        self.end_token = inverse_mapping["<E>"]
        self.padding_token = inverse_mapping["<P>"]
        self.max_output_length = data_config["output_dims"][0]
        self.args = vars(args) if args is not None else {}

        self.dim = self.args.get("tf_dim", TF_DIM)
        tf_fc_dim = self.args.get("tf_fc_dim", TF_FC_DIM)
        tf_nhead = self.args.get("tf_nhead", TF_NHEAD)
        tf_dropout = self.args.get("tf_dropout", TF_DROPOUT)
        tf_layers = self.args.get("tf_layers", TF_LAYERS)

        # ## Encoder part - should output  vector sequence of length self.dim per sample
        resnet = torchvision.models.resnet18(pretrained=False)
        self.resnet = torch.nn.Sequential(*(list(
            resnet.children())[:-2]))  # Exclude AvgPool and Linear layers
        # Resnet will output (B, RESNET_DIM, _H, _W) logits where _H = input_H // 32, _W = input_W // 32

        # self.encoder_projection = nn.Conv2d(RESNET_DIM, self.dim, kernel_size=(2, 1), stride=(2, 1), padding=0)
        self.encoder_projection = nn.Conv2d(RESNET_DIM,
                                            self.dim,
                                            kernel_size=1)
        # encoder_projection will output (B, dim, _H, _W) logits

        self.enc_pos_encoder = PositionalEncodingImage(
            d_model=self.dim,
            max_h=self.input_dims[1],
            max_w=self.input_dims[2])  # Max (Ho, Wo)

        # ## Decoder part
        self.embedding = nn.Embedding(self.num_classes, self.dim)
        self.fc = nn.Linear(self.dim, self.num_classes)

        self.dec_pos_encoder = PositionalEncoding(
            d_model=self.dim, max_len=self.max_output_length)

        self.y_mask = generate_square_subsequent_mask(self.max_output_length)

        self.transformer_decoder = nn.TransformerDecoder(
            nn.TransformerDecoderLayer(d_model=self.dim,
                                       nhead=tf_nhead,
                                       dim_feedforward=tf_fc_dim,
                                       dropout=tf_dropout),
            num_layers=tf_layers,
        )

        self.init_weights()  # This is empirically important
Пример #20
0
def build_model(model_class, config, tokenizer):
    encoder = model_class(config=config)
    decoder_layer = nn.TransformerDecoderLayer(
        d_model=config.hidden_size, nhead=config.num_attention_heads)
    decoder = nn.TransformerDecoder(decoder_layer, num_layers=6)
    model = Seq2Seq(
        encoder=encoder,
        decoder=decoder,
        config=config,
        beam_size=args.beam_size,
        max_length=args.max_target_length,
        sos_id=tokenizer.cls_token_id,
        eos_id=tokenizer.sep_token_id,
    )

    assert os.path.exists("pytorch_model.bin"), "Weight is not downloaded."

    model.load_state_dict(
        torch.load(
            "pytorch_model.bin",
            map_location=torch.device("cpu"),
        ),
        strict=False,
    )
    return model
Пример #21
0
 def __init__(self,
              d_model: int = 512,
              nhead: int = 8,
              num_encoder_layers: int = 6,
              num_decoder_layers: int = 6,
              dim_feedforward: int = 2048,
              dropout: float = 0.1,
              activation: str = "relu",
              source_vocab_length: int = 60000,
              target_vocab_length: int = 60000) -> None:
     super(Transformer, self).__init__()
     self.source_embedding = nn.Embedding(source_vocab_length, d_model)
     self.pos_encoder = PositionalEncoding(d_model)
     encoder_layer = nn.TransformerEncoderLayer(d_model, nhead,
                                                dim_feedforward, dropout,
                                                activation)
     encoder_norm = nn.LayerNorm(d_model)
     self.encoder = nn.TransformerEncoder(encoder_layer, num_encoder_layers,
                                          encoder_norm)
     self.target_embedding = nn.Embedding(target_vocab_length, d_model)
     decoder_layer = nn.TransformerDecoderLayer(d_model, nhead,
                                                dim_feedforward, dropout,
                                                activation)
     decoder_norm = nn.LayerNorm(d_model)
     self.decoder = nn.TransformerDecoder(decoder_layer, num_decoder_layers,
                                          decoder_norm)
     #self.out = nn.Linear(512, target_vocab_length)
     self.out = nn.Linear(d_model, target_vocab_length)
     self._reset_parameters()
     self.d_model = d_model
     self.nhead = nhead
Пример #22
0
    def __init__(self, args, embs_ann, vocab_out, pad, seg):
        '''
        speaker model
        '''
        super().__init__(args, embs_ann, vocab_out, pad, seg)

        # encoder and visual embeddings
        self.encoder_vl, self.encoder_lang = None, None
        if any('frames' in ann_type for ann_type in args.data['ann_type']):
            # create a multi-modal encoder
            self.encoder_vl = EncoderVL(args)
            # create feature embeddings
            self.vis_feat = FeatureFlat(input_shape=self.visual_tensor_shape,
                                        output_size=args.demb)
        else:
            # create an encoder for language only
            self.encoder_lang = EncoderLang(args.encoder_layers, args,
                                            embs_ann)

        # decoder parts
        decoder_layer = nn.TransformerDecoderLayer(
            args.demb, args.decoder_lang['heads'], args.decoder_lang['demb'],
            args.decoder_lang['dropout'])
        self.decoder = nn.TransformerDecoder(decoder_layer,
                                             args.decoder_lang['layers'])
        self.enc_pos = PosLangEncoding(
            args.demb) if args.decoder_lang['pos_enc'] else None
        self.emb_subgoal = nn.Embedding(len(vocab_out), args.demb)

        # final touch
        self.init_weights()
Пример #23
0
    def __init__(self, lookahead=10):

        super(TransformerModel, self).__init__()

        input_dims = 55
        enc_input_dims = 1024
        self.enc_input_dims = enc_input_dims
        num_heads = 8
        enc_ff_dims = 2048
        num_enc_layers = 12
        dropout = 0.1
        self.lookahead = lookahead

        self.encoder = nn.Linear(input_dims, enc_input_dims)
        encoder_layers = nn.TransformerEncoderLayer(enc_input_dims, num_heads,
                                                    enc_ff_dims, dropout)
        self.tranformer_encoder = nn.TransformerEncoder(
            encoder_layers, num_enc_layers)

        decoder_layers = nn.TransformerDecoderLayer(enc_input_dims, num_heads,
                                                    enc_ff_dims, dropout)
        self.transformer_decoder = nn.TransformerDecoder(
            decoder_layers, num_enc_layers)

        self.fc1 = nn.Linear(enc_input_dims, 3)
    def __init__(self, src_vocab_size=128, tgt_vocab_size=128,
                 embedding_dim=128, fcn_hidden_dim=128,
                 num_heads=4, num_layers=2, dropout=0.2):
        super(Transformer, self).__init__()

        self.embedding_dim = embedding_dim
        # Source and Encoder layers
        self.src_embed = Embedding(src_vocab_size, embedding_dim, padding_idx=PAD_ID)
        self.src_pos_encoder = PositionalEncoding(embedding_dim)
        encoder_layer = TransformerEncoderLayer(d_model=embedding_dim, nhead=num_heads,
                                                   dim_feedforward=fcn_hidden_dim, dropout=dropout)
        encoder_norm = nn.LayerNorm(embedding_dim)
        self.encoder = TransformerEncoder(encoder_layer, num_layers, encoder_norm)

        # Target and Decoder layers
        self.tgt_embed = Embedding(tgt_vocab_size, embedding_dim, padding_idx=PAD_ID)
        self.tgt_pos_encoder = PositionalEncoding(embedding_dim)
        decoder_layer = nn.TransformerDecoderLayer(d_model=embedding_dim, nhead=num_heads,
                                                   dim_feedforward=fcn_hidden_dim, dropout=dropout)
        decoder_norm = nn.LayerNorm(embedding_dim)
        self.decoder = nn.TransformerDecoder(decoder_layer, num_layers, decoder_norm)
        # Final linear layer
        self.final_out = nn.Linear(embedding_dim, tgt_vocab_size)

        # Initialize masks
        self.src_mask = None
        self.tgt_mask = None
        self.mem_mask = None
        # Initialize weights of model
        self._reset_parameters()
Пример #25
0
    def __init__(self, config: Config):
        super().__init__(config)
        self.teacher_forcing_epoch_num = config.experiment.teacher_forcing_epoch_num
        self.gradual_teacher_forcing = config.experiment.gradual_teacher_forcing

        n_heads = config.experiment.transformer_attention_heads
        ff_dim = config.experiment.transformer_ff_dim
        transformer_layers_num = config.experiment.transformer_attention_layers

        encoder_layer = nn.TransformerEncoderLayer(self.embed_dim,
                                                   n_heads,
                                                   ff_dim,
                                                   self.dropout,
                                                   batch_first=True)
        encoder_norm = nn.LayerNorm(self.embed_dim)
        self.encoder = nn.TransformerEncoder(encoder_layer,
                                             transformer_layers_num,
                                             encoder_norm)

        decoder_layer = nn.TransformerDecoderLayer(self.embed_dim,
                                                   n_heads,
                                                   ff_dim,
                                                   self.dropout,
                                                   batch_first=True)
        decoder_norm = nn.LayerNorm(self.embed_dim)
        self.decoder = nn.TransformerDecoder(decoder_layer,
                                             transformer_layers_num,
                                             decoder_norm)
Пример #26
0
    def __init__(self, d_model=512, n_head=8, num_encoder_layers=6,
                 num_decoder_layers=6, dim_feedforward=2048, dropout=0.1,
                 activation="relu", normalize_before=False):
        """
        Parameters:
            d_model: model dimensions
            n_head: number of multi-head self-attention
            num_encoder_layers: number of layers in encoder
            num_decoder_layers: number of layers in decoder
            dim_feedforward: FCN layer dimensions
            activation: activation function
        """
        super().__init__()
        self.d_model = d_model
        encoder_layer = nn.TransformerEncoderLayer(d_model, n_head, dim_feedforward,
                                                    dropout, activation)
        encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
        self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
        decoder_layer = nn.TransformerDecoderLayer(d_model, n_head, dim_feedforward,
                                                    dropout, activation)
        decoder_norm = nn.LayerNorm(d_model)
        self.transformer_decoder = nn.TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm)

        
        self.__reset__parameters()
Пример #27
0
    def __init__(self,
                 n_skill,
                 max_seq=100,
                 embed_dim=128,
                 num_heads=8,
                 dropout=0.2):
        super(SAKTModel, self).__init__()
        self.n_skill = n_skill
        self.embed_dim = embed_dim

        self.embedding = nn.Embedding(3, embed_dim)
        self.pos_embedding_enc = nn.Embedding(max_seq - 1, embed_dim)
        self.pos_embedding_dec = nn.Embedding(max_seq - 1, embed_dim)
        self.e_embedding = nn.Embedding(n_skill + 1, embed_dim)
        self.part_embedding = nn.Embedding(8, embed_dim)
        self.elapsed_time_embedding = nn.Embedding(302, embed_dim)
        self.duration_previous_content_embedding = nn.Embedding(302, embed_dim)
        encoder_layer = nn.TransformerEncoderLayer(d_model=embed_dim,
                                                   nhead=num_heads)
        self.transformer_enc = nn.TransformerEncoder(
            encoder_layer=encoder_layer, num_layers=2)
        decoder_layer = nn.TransformerDecoderLayer(d_model=embed_dim,
                                                   nhead=num_heads)
        self.transformer_dec = nn.TransformerDecoder(
            decoder_layer=decoder_layer, num_layers=2)

        self.dropout = nn.Dropout(0.2)
        self.layer_normal = nn.LayerNorm(embed_dim)

        self.ffn = FFN(embed_dim)
        self.pred = nn.Linear(embed_dim, 1)
Пример #28
0
    def __init__(self, feature_dim, vocab_size, n_head, n_layers, dropout):
        """
    :param n_head: the number of heads in Transformer
    :param n_layers: the number of layers of Transformer
    """
        super(DecoderTransformer, self).__init__()

        self.feature_dim = feature_dim
        self.embed_dim = feature_dim
        self.vocab_size = vocab_size
        self.dropout = dropout

        # embedding layer
        self.vocab_embedding = nn.Embedding(
            vocab_size, self.embed_dim)  #vocaburaly embedding

        # Transformer layer
        decoder_layer = nn.TransformerDecoderLayer(
            feature_dim,
            n_head,
            dim_feedforward=feature_dim * 4,
            dropout=self.dropout)
        self.transformer = nn.TransformerDecoder(decoder_layer, n_layers)
        self.position_encoding = PositionalEncoding(feature_dim)

        # Linear layer to find scores over vocabulary
        self.wdc = nn.Linear(feature_dim, vocab_size)
        self.dropout = nn.Dropout(p=self.dropout)
        self.init_weights(
        )  # initialize some layers with the uniform distribution
    def __init__(
        self,
        d_model: int,
        nhead: int,
        vocab_size: int,
        max_len: int,
        num_encoder_layers: int = 6,
        num_decoder_layers: int = 6,
    ) -> None:
        super(Transformer3, self).__init__(d_model, nhead, vocab_size, max_len)

        encoder_layer = nn.TransformerEncoderLayer(d_model=d_model,
                                                   nhead=nhead)
        decoder_layer = nn.TransformerDecoderLayer(d_model=d_model,
                                                   nhead=nhead)

        self.encoder = TransformerEncoderCustom(encoder_layer,
                                                num_encoder_layers)
        self.decoder = TransformerDecoderCustom(decoder_layer,
                                                num_decoder_layers)

        self.embedding = nn.Embedding(vocab_size, d_model)
        self.positional_encoding = PositionalEncoding(d_model, max_len)

        self.output_bias = Parameter(torch.Tensor(vocab_size))
        self._init_bias()
Пример #30
0
 def __init__(self, text, args, device):
     super(NMT, self).__init__()
     self.text = text
     self.args = args
     self.device = device
     self.Embeddings = Embeddings(args['embed_size'], self.text)
     self.encoder_layer = nn.TransformerEncoderLayer(
         d_model=args['d_model'],
         nhead=args['nhead'],
         dim_feedforward=args['dim_feedforward'],
         dropout=args['dropout'])
     self.encoder_norm = nn.LayerNorm(args['d_model'])
     self.encoder = nn.TransformerEncoder(
         encoder_layer=self.encoder_layer,
         num_layers=args['num_encoder_layers'],
         norm=self.encoder_norm)
     self.decoder_layer = nn.TransformerDecoderLayer(
         d_model=args['d_model'],
         nhead=args['nhead'],
         dim_feedforward=args['dim_feedforward'],
         dropout=args['dropout'])
     self.decoder_norm = nn.LayerNorm(args['d_model'])
     self.decoder = nn.TransformerDecoder(
         decoder_layer=self.decoder_layer,
         num_layers=args['num_decoder_layers'],
         norm=self.decoder_norm)
     self.project = nn.Linear(args['d_model'],
                              len(self.text.tar),
                              bias=False)
     self.project.weight = self.Embeddings.tar.weight
     self.dropout = nn.Dropout(args['dropout'])
     self.project_value = math.pow(args['d_model'], 0.5)
     self.eps = args['smoothing_eps']