Example #1
0
    def __init__(self, dim_sound, num_tokens=30522, dim_model=768, nhead=12, name="music_bert",\
                 num_encoder_layers=12, d_feed=3072, dropout=0.1, n_convs=4):
        super(MusicBert, self).__init__()

        self.d_model = dim_model
        self.num_tokens = num_tokens

        self.PATH = "models/" + name + ".pth"

        # Token Embedder
        self.embedder = nn.Embedding(num_tokens, dim_model)

        self.sound_compressor = SoundCNNEncoder([dim_sound] + [dim_model] *
                                                (n_convs - 1))
        self.sound_decompressor = SoundCNNDecoder([dim_model] * n_convs)

        self.position_embeddings = PositionalEncodings(dim_model, dropout)

        encoder_layer = TransformerEncoderLayer(dim_model, nhead, \
                                                dim_feedforward=d_feed, \
                                                dropout=dropout,
                                                activation='gelu')

        device = self.get_device()

        # Registers IDs in the buffers memory
        self.register_buffer("CLS_ID", torch.tensor([[101]]).to(device).long())
        self.register_buffer("SEP_ID", torch.tensor([[102]]).to(device).long())

        self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers)
Example #2
0
    def __init__(self, config ):
        super(Nucleus, self).__init__()
        self.config = config

        # Embedding Layer.
        self.embedding = nn.Embedding( bittensor.__vocab_size__,  bittensor.__network_dim__ )

        # Local Model
        local_layers = TransformerEncoderLayer( bittensor.__network_dim__, self.config.nucleus.nhead, self.config.nucleus.nhid, self.config.nucleus.dropout, batch_first=True)
        local_hidden_layers = TransformerEncoderLayer( bittensor.__network_dim__, self.config.nucleus.nhead, self.config.nucleus.nhid, self.config.nucleus.dropout, batch_first=True )
        self.local_pos_encoder = PositionalEncoding(bittensor.__network_dim__, self.config.nucleus.dropout)
        self.local_encoder = TransformerEncoder( local_layers, self.config.nucleus.nlayers )
        self.local_hidden = TransformerEncoder( local_hidden_layers, self.config.nucleus.nlayers )
        self.local_decoder = nn.Linear( bittensor.__network_dim__, bittensor.__vocab_size__ , bias=False)

        # Remote Model
        remote_context_layers = TransformerEncoderLayer( bittensor.__network_dim__, self.config.nucleus.nhead, self.config.nucleus.nhid, self.config.nucleus.dropout, batch_first=True )
        self.remote_hidden = TransformerEncoder( remote_context_layers, self.config.nucleus.nlayers )
        self.remote_decoder = nn.Linear( bittensor.__network_dim__, bittensor.__vocab_size__ , bias=False)

        self.loss_fct = nn.CrossEntropyLoss()
        self.noise_multiplier = self.config.nucleus.noise_multiplier
        self.peer_weights = nn.Parameter(torch.ones( [0] , requires_grad=True))
        self.init_weights()
        self.metagraph = None
        self.dendrite = None
Example #3
0
    def __init__(self, config, metagraph, dendrite, device):
        super(Validator, self).__init__()

        self.embedding = torch.nn.Embedding(bittensor.__vocab_size__,
                                            bittensor.__network_dim__)
        self.layers = TransformerEncoderLayer(bittensor.__network_dim__,
                                              config.nucleus.nhead,
                                              config.nucleus.nhid,
                                              config.nucleus.dropout,
                                              batch_first=True)
        self.encoder = TransformerEncoder(self.layers, config.nucleus.nlayers)
        self.c_layers = TransformerEncoderLayer(bittensor.__network_dim__,
                                                config.nucleus.nhead,
                                                config.nucleus.nhid,
                                                config.nucleus.dropout,
                                                batch_first=True)
        self.local_encoder = TransformerEncoder(self.c_layers, 1)
        self.decoder = torch.nn.Linear(bittensor.__network_dim__,
                                       bittensor.__vocab_size__,
                                       bias=False)
        self.loss_fct = torch.nn.CrossEntropyLoss()
        self.peer_weights = torch.ones([metagraph().n.item()],
                                       requires_grad=False,
                                       device=device)
        self.metagraph = metagraph
        self.dendrite = dendrite
        self.config = config
        self.device = device
        self.gates = {}
        self.sync_with_chain_state()
Example #4
0
    def __init__(self,
                 nlayers=3,
                 hdim_rgb=1024,
                 hdim_audio=128,
                 nhead=8,
                 dropout=0.5):
        super(TransformerModel, self).__init__()
        from torch.nn import TransformerEncoder, TransformerEncoderLayer
        self.model_type = 'Transformer'
        self.name = 'Transformer_gate_' + str(nlayers)
        self.dropout = dropout
        self.src_mask = None
        self.pos_encoder_rgb = PositionalEncoding(hdim_rgb, dropout)
        self.pos_encoder_audio = PositionalEncoding(hdim_audio, dropout)

        encoder_layers_rgb = TransformerEncoderLayer(hdim_rgb, nhead, hdim_rgb,
                                                     dropout)
        encoder_layers_audio = TransformerEncoderLayer(hdim_audio, nhead,
                                                       hdim_audio, dropout)

        self.transformer_rgb = TransformerEncoder(encoder_layers_rgb, nlayers)
        self.transformer_audio = TransformerEncoder(encoder_layers_audio,
                                                    nlayers)

        self.fc = nn.Linear((hdim_audio + hdim_rgb) * 5, NUM_CLASSES)

        self.gate_fc = nn.Linear((hdim_audio + hdim_rgb) * 5,
                                 (hdim_audio + hdim_rgb) * 5)

        self.init_weights()
Example #5
0
    def __init__(self, nuser, nloc, ntime, nquadkey, user_dim, loc_dim, time_dim, reg_dim, nhid, nhead_enc, nhead_dec, nlayers, dropout=0.5, **extra_config):
        super(QuadKeyLocPredictor, self).__init__()
        self.emb_user = embedding(nuser, user_dim, zeros_pad=True, scale=True)
        self.emb_loc = embedding(nloc, loc_dim, zeros_pad=True, scale=True)
        self.emb_reg = embedding(nquadkey, reg_dim, zeros_pad=True, scale=True)
        self.emb_time = embedding(ntime, time_dim, zeros_pad=True, scale=True)
        ninp = user_dim
        pos_encoding = extra_config.get("position_encoding", "transformer")
        if pos_encoding == "embedding":
            self.pos_encoder = PositionalEmbedding(loc_dim + reg_dim, dropout)
        elif pos_encoding == "transformer":
            self.pos_encoder = PositionalEncoding(loc_dim + reg_dim, dropout)
        self.enc_layer = TransformerEncoderLayer(loc_dim + reg_dim, nhead_enc, loc_dim + reg_dim, dropout)
        self.encoder = TransformerEncoder(self.enc_layer, nlayers)
        self.region_pos_encoder = PositionalEmbedding(reg_dim, dropout, max_len=20)
        self.region_enc_layer = TransformerEncoderLayer(reg_dim, 1, reg_dim, dropout=dropout)
        self.region_encoder = TransformerEncoder(self.region_enc_layer, 2)
        if not extra_config.get("use_location_only", False):
            if extra_config.get("embedding_fusion", "multiply") == "concat":
                if extra_config.get("user_embedding", False):
                    self.lin = nn.Linear(user_dim + loc_dim + reg_dim + time_dim, ninp)
                else:
                    self.lin = nn.Linear(loc_dim + reg_dim, ninp)

        ident_mat = torch.eye(ninp)
        self.register_buffer('ident_mat', ident_mat)

        self.layer_norm = nn.LayerNorm(ninp)
        self.extra_config = extra_config
        self.dropout = dropout
    def __init__(self, config, data_feature):
        super().__init__(config, data_feature)
        self.device = config['device']
        # depend on dataset
        self.num_neg = config['executor_config']['train']['num_negative_samples']
        self.temperature = config['executor_config']['train']['temperature']

        # from dataset
        # from train_dataset!!
        nuser = data_feature['nuser']
        nloc = data_feature['nloc']
        ntime = data_feature['ntime']
        nquadkey = data_feature['nquadkey']

        # from config
        user_dim = int(config['model_config']['user_embedding_dim'])
        loc_dim = int(config['model_config']['location_embedding_dim'])
        time_dim = int(config['model_config']['time_embedding_dim'])
        reg_dim = int(config['model_config']['region_embedding_dim'])
        # nhid = int(config['model_config']['hidden_dim_encoder'])
        nhead_enc = int(config['model_config']['num_heads_encoder'])
        # nhead_dec = int(config['model_config']['num_heads_decoder'])
        nlayers = int(config['model_config']['num_layers_encoder'])
        dropout = float(config['model_config']['dropout'])
        extra_config = config['model_config']['extra_config']
        # print(f"nloc: {nloc} \t loc_dim: {loc_dim}")
        # essential
        self.emb_loc = Embedding(nloc, loc_dim, zeros_pad=True, scale=True)
        self.emb_reg = Embedding(nquadkey, reg_dim, zeros_pad=True, scale=True)
        # optional
        self.emb_user = Embedding(nuser, user_dim, zeros_pad=True, scale=True)
        self.emb_time = Embedding(ntime, time_dim, zeros_pad=True, scale=True)
        ninp = user_dim

        pos_encoding = extra_config.get("position_encoding", "transformer")
        if pos_encoding == "embedding":
            self.pos_encoder = PositionalEmbedding(loc_dim + reg_dim, dropout)
        elif pos_encoding == "transformer":
            self.pos_encoder = PositionalEncoding(loc_dim + reg_dim, dropout)
        self.enc_layer = TransformerEncoderLayer(loc_dim + reg_dim, nhead_enc, loc_dim + reg_dim, dropout)
        self.encoder = TransformerEncoder(self.enc_layer, nlayers)

        self.region_pos_encoder = PositionalEmbedding(reg_dim, dropout, max_len=20)
        self.region_enc_layer = TransformerEncoderLayer(reg_dim, 1, reg_dim, dropout=dropout)
        self.region_encoder = TransformerEncoder(self.region_enc_layer, 2)

        if not extra_config.get("use_location_only", False):
            if extra_config.get("embedding_fusion", "multiply") == "concat":
                if extra_config.get("user_embedding", False):
                    self.lin = nn.Linear(user_dim + loc_dim + reg_dim + time_dim, ninp)
                else:
                    self.lin = nn.Linear(loc_dim + reg_dim, ninp)

        ident_mat = torch.eye(ninp)
        self.register_buffer('ident_mat', ident_mat)
        self.layer_norm = nn.LayerNorm(ninp)

        self.extra_config = extra_config
        self.dropout = dropout
Example #7
0
 def __init__(self, ntoken, ninp, nhead, nhid, nlayers, params, dropout=0.5, embedding=None):
     super().__init__()
     from torch.nn import TransformerEncoder, TransformerEncoderLayer
     self.model_type = 'TransformerEncoder'
     self.src_mask = None
     self.pos_encoder = PositionalEncoding(ninp, dropout)
     encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout)
     self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
     self.embedding = nn.Embedding(ntoken, ninp) if embedding is None else embedding
     self.ninp = ninp
     self.params = params
Example #8
0
    def __init__(self,
                 ntoken_in,
                 ntoken_out,
                 embed_size,
                 drop_it,
                 trns_model='base'):
        super(Transformer_Model, self).__init__()

        self.src_mask = None
        self.trg_mask = None
        self.drop = drop_it
        self.PAD_token = 2
        self.src_pad_mask = None
        self.trg_pad_mask = None

        self.embed_in = nn.Embedding(ntoken_in, embed_size)
        self.embed_out = nn.Embedding(ntoken_out, embed_size)
        self.pos_encoder = PositionalEncoding(embed_size,
                                              self.drop,
                                              max_len=5000)

        if trns_model == 'base':
            # base model
            encoder_layer = TransformerEncoderLayer(512, 8, 2048, 0.1)
            encoder_norm = LayerNorm(512)
            self.encoder = TransformerEncoder(encoder_layer, 6, encoder_norm)

            decoder_layer = TransformerDecoderLayer(512, 8, 2048, 0.1)
            decoder_norm = LayerNorm(512)
            self.decoder = TransformerDecoder(decoder_layer, 6, decoder_norm)

        else:
            # big model
            encoder_layer = TransformerEncoderLayer(1024, 16, 4096, 0.3)
            encoder_norm = LayerNorm(1024)
            self.encoder = TransformerEncoder(encoder_layer, 6, encoder_norm)

            decoder_layer = TransformerDecoderLayer(1024, 16, 4096, 0.3)
            decoder_norm = LayerNorm(1024)
            self.decoder = TransformerDecoder(decoder_layer, 6, decoder_norm)

        self.ninp = embed_size
        self.linear_dec = nn.Linear(embed_size, ntoken_out)

        # initialise embedding & linear layer parameters
        self.init_weights()
        # initialise transformer parameters
        self.reset_params()
Example #9
0
    def __init__(self,
                 in_dim,
                 num_heads,
                 hidden_dim,
                 num_layers,
                 device,
                 dropout=0.5,
                 use_pos_encoder=False):
        super(TransformerModel, self).__init__()

        self.in_dim = in_dim
        self.num_heads = num_heads
        self.hidden_dim = hidden_dim
        self.num_layers = num_layers
        self.device = device

        self.batch_size = 128

        if dropout is None:
            dropout = 0

        if use_pos_encoder:
            self.pos_encoder = PositionalEncoding(in_dim, dropout)
        else:
            self.pos_encoder = lambda x: x
        encoder_layers = TransformerEncoderLayer(in_dim, num_heads, hidden_dim,
                                                 dropout)
        self.transformer_encoder = TransformerEncoder(encoder_layers,
                                                      num_layers)
Example #10
0
 def __init__(self, num_layers, em_dim, num_heads, ff_dim, input_vocab,
              target_vocab, max_pos_enc, max_pos_dec):
     super().__init__()
     self.em_dim = em_dim
     # encoder layers
     self.encode_embedding = nn.Embedding(input_vocab, em_dim)
     self.encode_pos_embedding = PositionalEncoding(max_pos=max_pos_enc,
                                                    em_dim=em_dim)
     encoder_layer = TransformerEncoderLayer(em_dim,
                                             nhead=num_heads,
                                             dim_feedforward=ff_dim)
     self.transformer_encoder = TransformerEncoder(
         encoder_layer=encoder_layer, num_layers=num_layers)
     self.encode_dropout = nn.Dropout(p=0.1)
     # decoder layers
     self.decode_embedding = nn.Embedding(target_vocab, em_dim)
     self.decode_pos_embedding = PositionalEncoding(max_pos=max_pos_dec,
                                                    em_dim=em_dim)
     decoder_layer = TransformerDecoderLayer(em_dim,
                                             nhead=num_heads,
                                             dim_feedforward=ff_dim)
     self.transformer_decoder = TransformerDecoder(
         decoder_layer=decoder_layer, num_layers=num_layers)
     self.decode_dropout = nn.Dropout(p=0.1)
     self.final_linear = nn.Linear(em_dim, target_vocab)
    def __init__(self,
                 input_size,
                 num_sources,
                 dmodel,
                 num_heads,
                 num_layers,
                 hidden_size,
                 dropout=0.5,
                 num_splits=2):
        super().__init__()
        from torch.nn import TransformerEncoder, TransformerEncoderLayer

        self.m = input_size
        self.s = num_sources
        self.dmodel = dmodel
        self.ns = num_splits

        self.model_type = 'Transformer'
        self.src_mask = None
        self.pos_encoder = PositionalEncoding(dmodel, dropout)
        encoder_layers = TransformerEncoderLayer(dmodel, num_heads,
                                                 hidden_size, dropout)
        self.transformer_encoder = TransformerEncoder(encoder_layers,
                                                      num_layers)
        self.encoder = nn.Linear(2 * input_size, self.ns * dmodel)
        self.decoder = nn.Linear(dmodel, 2 * self.s * self.m // self.ns)

        self.init_weights()
Example #12
0
    def __init__(self,
                 n_enc_layer,
                 embed_size,
                 n_head,
                 intermediate_size,
                 max_seq_len=512,
                 dropout=0.1,
                 **kwargs):
        super(Transformer_Encoder_Extraction_Layer, self).__init__(**kwargs)
        assert embed_size % n_head == 0

        self.n_enc_layer = n_enc_layer
        self.embed_size = embed_size
        self.max_seq_len = max_seq_len
        self.n_head = n_head
        self.intermediate_size = intermediate_size
        self.dropout = dropout

        self.positional_encoder = Positional_Encoding_Layer(
            embed_size, max_seq_len=max_seq_len)
        transformer_encoder_layer = TransformerEncoderLayer(
            embed_size,
            n_head,
            dim_feedforward=intermediate_size,
            dropout=dropout)
        self.transformer_encoder = TransformerEncoder(
            transformer_encoder_layer, n_enc_layer)

        self._init_weights()
Example #13
0
    def __init__(self,
                 d_model: int = 512,
                 num_heads: int = 8,
                 num_encoder_layers: int = 6,
                 num_decoder_layers: int = 6,
                 units: int = 2048,
                 dropout: float = 0.1,
                 activation: str = "relu") -> NoReturn:
        """
        :param d_model: 深度,词嵌入维度
        :param num_heads: 注意力头数
        :param num_encoder_layers: encoder层数
        :param num_decoder_layers: decoder层数
        :param units: 单元数
        :param dropout: 采样率
        :param activation: 激活方法
        """
        super(Transformer, self).__init__()

        encoder_layer = TransformerEncoderLayer(d_model, num_heads, units,
                                                dropout, activation)
        encoder_norm = torch.nn.LayerNorm(d_model)
        self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers,
                                          encoder_norm)

        decoder_layer = TransformerDecoderLayer(d_model, num_heads, units,
                                                dropout, activation)
        decoder_norm = torch.nn.LayerNorm(d_model)
        self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers,
                                          decoder_norm)

        self._reset_parameters()

        self.d_model = d_model
        self.num_heads = num_heads
    def __init__(self,
                 ntoken,
                 ninp=128,
                 nhead=8,
                 nhid=128,
                 nlayers=8,
                 dropout=0.5):
        #ntoken: len(dictionary)
        #ninp : embedding dimension
        #nhead: # of multiheadattention
        #nhid : dim(feedforward network model)
        #nlayer: # of nn.TransofrmerEncoderLayer
        super(TransformerModel, self).__init__()
        from torch.nn import TransformerEncoder, TransformerEncoderLayer
        if not ntoken:
            self.ntoken = len(load_object("engDictAnn.pkl"))
        else:
            self.ntoken = ntoken
        self.model_type = "Transformer"
        self.pos_encoder = PositionalEncoding(ninp, dropout)
        encdoer_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout)
        self.transformer_encoder = TransformerEncoder(encdoer_layers, nlayers)
        self.encoder = nn.Embedding(ntoken, ninp)
        self.ninp = ninp
        self.decoder = nn.Linear(ninp, self.ntoken)

        self.init_weights()
Example #15
0
 def __init__(self, vocab, nemb, nhead, nhid, nlayer, nclass, dropout=0.1):
     super(Transformer, self).__init__()
     try:
         from torch.nn import TransformerEncoder, TransformerEncoderLayer
     except:
         raise ImportError('TransformerEncoder module does not exist in PyTorch 1.1 or lower.')
     self.model_type = 'Transformer'
     self.ntoken = len(vocab)
     self.ninp = nemb
     self.nhead = nhead
     self.nhid = nhid
     self.nlayer = nlayer
     self.nclass = nclass
     self.dropout = dropout
     
     #self.encoder = nn.Embedding.from_pretrained(vocab.vectors, freeze=False)
     self.embedding = nn.Embedding(self.ntoken, self.ninp)
     #self.dropout1 = nn.Dropout(self.dropout)
     self.pos_encoder = PositionalEncoding(nemb, dropout)
     
     self.src_mask = None
     encoder_layers = TransformerEncoderLayer(self.ninp, self.nhead, self.nhid, self.dropout)
     self.transformer_encoder = TransformerEncoder(encoder_layers, self.nlayer)
     self.dropout2 = nn.Dropout(self.dropout)
     #self.pool = nn.AdaptiveMaxPool1d(1)
     self.pool = nn.AdaptiveAvgPool1d(1)
     self.decoder = nn.Linear(self.ninp, self.nclass)
Example #16
0
    def __init__(self,
                 ntoken,
                 ninp,
                 max_len,
                 nhead,
                 nhid,
                 nlayers,
                 nclasses,
                 cat_tokens,
                 cat_size,
                 cat_len,
                 dropout=0.5):
        super(TransformerModel, self).__init__()

        self.model_type = 'Transformer'

        # transformer layers
        self.pos_encoder = PositionalEncoding(ninp, dropout)
        encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout)
        self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
        self.encoder = nn.Embedding(ntoken, ninp)

        # embed extra categorical variables
        self.cat_embed = nn.Embedding(cat_tokens, cat_size)

        # final fc
        self.decoder = nn.Linear(ninp * max_len + cat_size * cat_len, nclasses)

        #dimensions
        self.ninp = ninp
        self.max_len = max_len
        self.cat_size = cat_size
        self.cat_len = cat_len

        self.init_weights()
Example #17
0
    def __init__(self, nuser, nloc, ntime, nreg, user_dim, loc_dim, time_dim, reg_dim, nhid, nhead_enc, nhead_dec, nlayers, dropout=0.5, **extra_config):
        super(LocPredictor, self).__init__()
        self.emb_user = embedding(nuser, user_dim, zeros_pad=True, scale=True)
        self.emb_loc = embedding(nloc, loc_dim, zeros_pad=True, scale=True)
        self.emb_reg = embedding(nreg, reg_dim, zeros_pad=True, scale=True)
        self.emb_time = embedding(ntime, time_dim, zeros_pad=True, scale=True)
        if not ((user_dim == loc_dim) and (user_dim == time_dim) and (user_dim == reg_dim)):
            raise Exception('user, location, time and region should have the same embedding size')
        ninp = user_dim
        pos_encoding = extra_config.get("position_encoding", "transformer")
        if pos_encoding == "embedding":
            self.pos_encoder = PositionalEmbedding(ninp, dropout)
        elif pos_encoding == "transformer":
            self.pos_encoder = PositionalEncoding(ninp, dropout)
        self.enc_layer = TransformerEncoderLayer(ninp, nhead_enc, nhid, dropout)
        self.encoder = TransformerEncoder(self.enc_layer, nlayers)
        if not extra_config.get("use_location_only", False):
            if extra_config.get("embedding_fusion", "multiply") == "concat":
                if extra_config.get("user_embedding", False):
                    self.lin = nn.Linear(user_dim + loc_dim + reg_dim + time_dim, ninp)
                else:
                    self.lin = nn.Linear(loc_dim + reg_dim + time_dim, ninp)

        ident_mat = torch.eye(ninp)
        self.register_buffer('ident_mat', ident_mat)

        self.layer_norm = nn.LayerNorm(ninp)
        self.extra_config = extra_config
        self.dropout = dropout
Example #18
0
    def __init__(self,
                 ntoken=549,
                 ninp=256,
                 nhead=2,
                 nhid=200,
                 nlayers=2,
                 dropout=0.1):
        super(TransformerModel, self).__init__()
        from torch.nn import TransformerEncoder, TransformerEncoderLayer
        self.model_type = 'Transformer'
        self.src_mask = None
        self.pos_encoder = PositionalEncoding(ninp, dropout)
        encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout)
        self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
        self.input_linear = nn.Linear(ntoken, ninp)
        self.ninp = ninp
        self.input_size = ntoken
        self.decoder_rnn = nn.GRU(ninp, nhid, batch_first=True)
        self.domain_classify_layer1 = nn.Linear(ninp, 64)
        self.domain_classify_layer2 = nn.Linear(64, 16)
        self.domain_classify_layer3 = nn.Linear(16, 8)

        self.linear1 = nn.Linear(ntoken, nip)
        self.linear2 = nn.Linear(nip, nip)

        self.output_linear = nn.Linear(nhid, ntoken)

        self.init_weights()
Example #19
0
 def __init__(
     self,
     ntoken,
     ninp,
     nhead,
     nhid,
     nlayers,
     dropout=0.5,
     activation="relu",
     tie_weights=False,
 ):
     super(TransformerModel, self).__init__()
     try:
         from torch.nn import TransformerEncoder, TransformerEncoderLayer
     except ImportError:
         raise ImportError("TransformerEncoder module does not exist in "
                           "PyTorch 1.1 or lower.")
     self.model_type = "Transformer"
     self.src_mask = None
     self.pos_encoder = PositionalEncoding(ninp, dropout)
     encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout,
                                              activation)
     self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
     self.encoder = nn.Embedding(ntoken, ninp)
     self.ninp = ninp
     self.decoder = nn.Linear(ninp, ntoken)
     if tie_weights:
         if nhid != ninp:
             raise ValueError(
                 "When using the tied flag, nhid must be equal "
                 "to emsize.")
         self.decoder.weight = self.encoder.weight
     self.init_weights()
Example #20
0
    def __init__(self, cfg: DictConfig, cwd: Path):
        super().__init__()

        #self.example_input_array = torch.rand(2, 1, 1024, 128)

        self.cfg = cfg
        self.dataset = cfg.dataset
        self.hparams = cfg.hparams
        self.cwd = cwd

        self.model_type = 'Transformer'
        self.src_mask = None
        self.pos_encoder = PositionalEncoding(self.hparams["n_mels"], self.hparams["dropout"])

        encoder_layers = TransformerEncoderLayer(self.hparams["n_mels"],
                                                 self.hparams["n_head"],
                                                 self.hparams["n_hid"],
                                                 self.hparams["dropout"])
        self.encoder = TransformerEncoder(encoder_layers, self.hparams["n_layers"])

        decoder_layers = TransformerDecoderLayer(self.hparams["n_mels"],
                                                 self.hparams["n_head"],
                                                 self.hparams["n_hid"],
                                                 self.hparams["dropout"])
        self.decoder = TransformerDecoder(decoder_layers, self.hparams["n_layers"])
Example #21
0
    def __init__(
        self,
        feature_size,
        output_size,
        ninp,
        nhead,
        nhid,
        nlayers,
        dropout=0.5,
        condition_decoder=False,
        transformer_norm=False,
    ):
        """This model is built upon https://pytorch.org/tutorials/beginner/transformer_tutorial.html"""
        super(TransformerModel, self).__init__()
        self.model_type = "Transformer"
        encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout)

        self.transformer_encoder = TransformerEncoder(
            encoder_layers,
            nlayers,
            norm=nn.LayerNorm(ninp) if transformer_norm else None,
        )
        self.encoder = nn.Linear(feature_size, ninp)
        self.ninp = ninp
        self.condition_decoder = condition_decoder
        self.decoder = nn.Linear(
            ninp + feature_size if condition_decoder else ninp, output_size
        )
        self.init_weights()
    def __init__(self, src_vocab_size=128, tgt_vocab_size=128,
                 embedding_dim=128, fcn_hidden_dim=128,
                 num_heads=4, num_layers=2, dropout=0.2):
        super(Transformer, self).__init__()

        self.embedding_dim = embedding_dim
        # Source and Encoder layers
        self.src_embed = Embedding(src_vocab_size, embedding_dim, padding_idx=PAD_ID)
        self.src_pos_encoder = PositionalEncoding(embedding_dim)
        encoder_layer = TransformerEncoderLayer(d_model=embedding_dim, nhead=num_heads,
                                                   dim_feedforward=fcn_hidden_dim, dropout=dropout)
        encoder_norm = nn.LayerNorm(embedding_dim)
        self.encoder = TransformerEncoder(encoder_layer, num_layers, encoder_norm)

        # Target and Decoder layers
        self.tgt_embed = Embedding(tgt_vocab_size, embedding_dim, padding_idx=PAD_ID)
        self.tgt_pos_encoder = PositionalEncoding(embedding_dim)
        decoder_layer = nn.TransformerDecoderLayer(d_model=embedding_dim, nhead=num_heads,
                                                   dim_feedforward=fcn_hidden_dim, dropout=dropout)
        decoder_norm = nn.LayerNorm(embedding_dim)
        self.decoder = nn.TransformerDecoder(decoder_layer, num_layers, decoder_norm)
        # Final linear layer
        self.final_out = nn.Linear(embedding_dim, tgt_vocab_size)

        # Initialize masks
        self.src_mask = None
        self.tgt_mask = None
        self.mem_mask = None
        # Initialize weights of model
        self._reset_parameters()
Example #23
0
File: model.py Project: jeanm/text
 def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5):
     super(BertModel, self).__init__()
     self.model_type = 'Transformer'
     self.bert_embed = BertEmbedding(ntoken, ninp)
     encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout)
     self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
     self.ninp = ninp
    def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5):
        super(TransformerModel, self).__init__()
        try:
            from torch.nn import TransformerEncoder, TransformerEncoderLayer, TransformerDecoder, TransformerDecoderLayer
        except:
            raise ImportError('Use a newer version of PyTorch')
        self.model_type = 'transformer'
        self.src_mask = None
        self.ninp = ninp
        self.ntoken = ntoken
        self.pos_encoder = PositionalEncoding(ninp, dropout)
        self.encoder = nn.Embedding(ntoken, ninp)

        encoder_norm = nn.LayerNorm(ninp)
        encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout)
        self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers,
                                                      encoder_norm)

        #decoder_norm = nn.LayerNorm(ninp)
        #decoder_layers = TransformerDecoderLayer(ninp, nhead, nhid, dropout)
        #self.transformer_decoder = TransformerDecoder(decoder_layers, nlayers, decoder_norm)

        self.decoder = nn.Linear(ninp, ntoken)

        self.init_weights()
Example #25
0
    def __init__(self,
                 src_feature_size,
                 ntoken,
                 nhead,
                 nhid,
                 nlayers,
                 dropout=0.5,
                 decoder_d_model=2048):
        super(TransformerModel, self).__init__()
        from torch.nn import TransformerEncoder, TransformerEncoderLayer, TransformerDecoder, TransformerDecoderLayer, Transformer
        self.encoder_d_model = src_feature_size
        self.decoder_d_model = decoder_d_model
        self.noken = ntoken
        self.model_type = 'Transformer'
        self.trg_mask = None
        self.encoder_pre = PreProcessing()
        self.enc_linear = nn.Linear(src_feature_size, decoder_d_model)
        self.pos_encoder = PositionalEncoding(self.decoder_d_model, dropout)
        encoder_layers = TransformerEncoderLayer(d_model=src_feature_size,
                                                 nhead=nhead,
                                                 dim_feedforward=nhid,
                                                 dropout=dropout)
        self.transformer_encoder = TransformerEncoder(
            encoder_layer=encoder_layers, num_layers=nlayers)

        self.embedder = Embedder(ntoken, decoder_d_model)
        self.pos_decoder = PositionalEncoding(decoder_d_model, dropout)
        decoder_layers = TransformerDecoderLayer(d_model=decoder_d_model,
                                                 nhead=nhead,
                                                 dim_feedforward=nhid,
                                                 dropout=dropout)
        self.transformer_decoder = TransformerDecoder(
            decoder_layer=decoder_layers, num_layers=nlayers)
        self.out = nn.Linear(decoder_d_model, ntoken)
Example #26
0
    def __init__(self, kg_graph_repr: Dict[str, np.ndarray], config: dict, id2e: tuple = None):

        super(self.__class__, self).__init__(kg_graph_repr, config)

        self.model_name = 'StarE_Transformer_Statement'
        self.hid_drop2 = config['STAREARGS']['HID_DROP2']
        self.feat_drop = config['STAREARGS']['FEAT_DROP']
        self.num_transformer_layers = config['STAREARGS']['T_LAYERS']
        self.num_heads = config['STAREARGS']['T_N_HEADS']
        self.num_hidden = config['STAREARGS']['T_HIDDEN']
        self.d_model = config['EMBEDDING_DIM']
        self.positional = config['STAREARGS']['POSITIONAL']

        self.object_mask_emb = torch.nn.Parameter(torch.randn(1, self.emb_dim,dtype=torch.float32),True)
        self.hidden_drop = torch.nn.Dropout(self.hid_drop)
        self.hidden_drop2 = torch.nn.Dropout(self.hid_drop2)
        self.feature_drop = torch.nn.Dropout(self.feat_drop)

        encoder_layers = TransformerEncoderLayer(self.d_model, self.num_heads, self.num_hidden, config['STAREARGS']['HID_DROP2'])
        self.encoder = TransformerEncoder(encoder_layers, config['STAREARGS']['T_LAYERS'])
        self.position_embeddings = nn.Embedding(config['MAX_QPAIRS'], self.d_model)
        self.layer_norm = torch.nn.LayerNorm(self.emb_dim)

        self.flat_sz = self.emb_dim * (config['MAX_QPAIRS'] - 1)
        self.fc = torch.nn.Linear(self.emb_dim, self.emb_dim)
Example #27
0
    def __init__(self, num_tokens, embedding_table, max_time_steps, text_dim, quat_dim, quat_channels,
                 offsets_dim, intended_emotion_dim, intended_polarity_dim, acting_task_dim,
                 gender_dim, age_dim, handedness_dim, native_tongue_dim, num_heads, num_hidden_units,
                 num_layers, dropout=0.5):
        super(T2GNet, self).__init__()
        self.T = max_time_steps
        self.text_dim = text_dim
        self.quat_channels = quat_channels
        self.text_mask = None
        self.quat_mask = None
        self.text_embedding = nn.Embedding.from_pretrained(embedding_table, freeze=True)
        self.text_pos_encoder = PositionalEncoding(text_dim, dropout)
        encoder_layers = TransformerEncoderLayer(text_dim, num_heads, num_hidden_units, dropout)
        self.transformer_encoder = TransformerEncoder(encoder_layers, num_layers)
        intermediate_dim = int((text_dim + quat_dim) / 2)
        self.text_embed = nn.Linear(text_dim + intended_emotion_dim + intended_polarity_dim +\
            acting_task_dim + gender_dim + age_dim + handedness_dim + native_tongue_dim, intermediate_dim)
        self.text_offsets_to_gestures = nn.Linear(intermediate_dim + offsets_dim, quat_dim)

        self.quat_pos_encoder = PositionalEncoding(quat_dim, dropout)
        decoder_layers = TransformerDecoderLayer(quat_dim, num_heads, num_hidden_units, dropout)
        self.transformer_decoder = TransformerDecoder(decoder_layers, num_layers)
        self.temporal_smoothing = nn.ModuleList((
            nn.Conv1d(max_time_steps, max_time_steps, 3, padding=1),
            nn.Conv1d(max_time_steps, max_time_steps, 3, padding=1),
        ))
        self.decoder = nn.Linear(text_dim, num_tokens)

        self.init_weights()
Example #28
0
    def __init__(self, kg_graph_repr: Dict[str, np.ndarray], config: dict, id2e: tuple = None):
        if id2e is not None:
            super(self.__class__, self).__init__(kg_graph_repr, config, id2e[1])
        else:
            super(self.__class__, self).__init__(kg_graph_repr, config)

        self.model_name = 'StarE_Transformer_Statement'
        self.hid_drop2 = config['STAREARGS']['HID_DROP2']
        self.feat_drop = config['STAREARGS']['FEAT_DROP']
        self.num_transformer_layers = config['STAREARGS']['T_LAYERS']
        self.num_heads = config['STAREARGS']['T_N_HEADS']
        self.num_hidden = config['STAREARGS']['T_HIDDEN']
        self.d_model = config['EMBEDDING_DIM']
        self.positional = config['STAREARGS']['POSITIONAL']
        self.pooling = config['STAREARGS']['POOLING']  # min / avg / concat

        self.hidden_drop = torch.nn.Dropout(self.hid_drop)
        self.hidden_drop2 = torch.nn.Dropout(self.hid_drop2)
        self.feature_drop = torch.nn.Dropout(self.feat_drop)

        encoder_layers = TransformerEncoderLayer(self.d_model, self.num_heads, self.num_hidden, config['STAREARGS']['HID_DROP2'])
        self.encoder = TransformerEncoder(encoder_layers, config['STAREARGS']['T_LAYERS'])
        self.position_embeddings = nn.Embedding(config['MAX_QPAIRS'] - 1, self.d_model)
        self.layer_norm = torch.nn.LayerNorm(self.emb_dim)

        if self.pooling == "concat":
            self.flat_sz = self.emb_dim * (config['MAX_QPAIRS'] - 1)
            self.fc = torch.nn.Linear(self.flat_sz, self.emb_dim)
        else:
            self.fc = torch.nn.Linear(self.emb_dim, self.emb_dim)
Example #29
0
    def __init__(self,
                 ntoken,
                 ninp,
                 nhead,
                 nhid,
                 nlayers,
                 dropout=0.5,
                 norm=True,
                 sp=0.0,
                 activation='relu'):
        super(TransformerModel, self).__init__()
        try:
            from torch.nn import TransformerEncoder, TransformerEncoderLayer
        except:
            raise ImportError(
                'TransformerEncoder module does not exist in PyTorch 1.1 or lower.'
            )
        self.model_type = 'Transformer'
        self.src_mask = None
        self.pos_encoder = PositionalEncoding(ninp, dropout)
        encoder_layers = TransformerEncoderLayer(ninp,
                                                 nhead,
                                                 nhid,
                                                 dropout,
                                                 norm=norm,
                                                 sp=sp,
                                                 activation=activation)  #Yang
        self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
        self.encoder = nn.Embedding(ntoken, ninp)
        self.ninp = ninp
        self.decoder = nn.Linear(ninp, ntoken)

        self.init_weights()
Example #30
0
    def __init__(self, config: dict):
        super().__init__(config)

        #self.emb_dim = config['EMBEDDING_DIM']
        self.entities = get_param((self.num_ent, self.emb_dim))
        self.relations = get_param((2 * self.num_rel, self.emb_dim))

        self.model_name = 'Transformer_Statement'
        self.hid_drop2 = config['STAREARGS']['HID_DROP2']
        self.feat_drop = config['STAREARGS']['FEAT_DROP']
        self.num_transformer_layers = config['STAREARGS']['T_LAYERS']
        self.num_heads = config['STAREARGS']['T_N_HEADS']
        self.num_hidden = config['STAREARGS']['T_HIDDEN']
        self.d_model = config['EMBEDDING_DIM']
        self.positional = config['STAREARGS']['POSITIONAL']

        self.pooling = config['STAREARGS']['POOLING']  # min / avg / concat
        self.device = config['DEVICE']

        self.hidden_drop = torch.nn.Dropout(self.hid_drop)
        self.hidden_drop2 = torch.nn.Dropout(self.hid_drop2)
        self.feature_drop = torch.nn.Dropout(self.feat_drop)

        encoder_layers = TransformerEncoderLayer(self.d_model, self.num_heads, self.num_hidden,
                                                 config['STAREARGS']['HID_DROP2'])
        self.encoder = TransformerEncoder(encoder_layers, config['STAREARGS']['T_LAYERS'])
        self.position_embeddings = nn.Embedding(config['MAX_QPAIRS'] - 1, self.d_model)
        self.layer_norm = torch.nn.LayerNorm(self.emb_dim)

        if self.pooling == "concat":
            self.flat_sz = self.emb_dim * (config['MAX_QPAIRS'] - 1)
            self.fc = torch.nn.Linear(self.flat_sz, self.emb_dim)
        else:
            self.fc = torch.nn.Linear(self.emb_dim, self.emb_dim)