Пример #1
0
    def __init__(self, num_conv1d_banks, num_highway_blocks, in_dims, out_dims,
                 activation):
        super(CBHG, self).__init__()

        self.num_highway_blocks = num_highway_blocks

        self.conv1d_banks = Conv1dBanks(num_conv1d_banks, in_dims, out_dims,
                                        activation)

        # Since kernel_size = 2, padding + 1
        self.max_pool1d = MaxPool1d(kernel_size=2, stride=1, padding=1)

        self.projection1 = Conv1dNorm(in_dims=num_conv1d_banks * out_dims,
                                      out_dims=out_dims,
                                      kernel_size=3,
                                      activation_fn=activation)
        self.projection2 = Conv1dNorm(in_dims=out_dims,
                                      out_dims=out_dims,
                                      kernel_size=3,
                                      activation_fn=None)

        self.highway = HighwayNet(in_dims=out_dims)

        self.gru = GRU(out_dims,
                       out_dims,
                       batch_first=True,
                       bidirectional=True)
Пример #2
0
 def __init__(self,
              idim: int,
              hdim: int,
              nlayers: int = 1,
              enc_type: str = "blstm"):
     """
     This represents the computation that happens for 1 RNN Layer
     Uses packing,padding utils from Pytorch
     :param int input_dim- The input size of the RNN
     :param int hidden_dim- The hidden size of the RNN
     :param int nlayers- Number of RNN Layers
     :param str enc_type : Type of encoder- RNN/GRU/LSTM
     """
     super(RNNLayer, self).__init__()
     bidir = True if enc_type[0] == 'b' else False
     enc_type = enc_type[1:] if enc_type[0] == 'b' else enc_type
     if enc_type == "rnn":
         self.elayer = RNN(idim,
                           hdim,
                           nlayers,
                           batch_first=True,
                           bidirectional=bidir)
     elif enc_type == "lstm":
         self.elayer = LSTM(idim,
                            hdim,
                            nlayers,
                            batch_first=True,
                            bidirectional=bidir)
     else:
         self.elayer = GRU(idim,
                           hdim,
                           nlayers,
                           batch_first=True,
                           bidirectional=bidir)
Пример #3
0
    def __init__(self,
                 num_nodes,
                 num_rels,
                 hidden_channels,
                 seq_len,
                 num_layers=1,
                 dropout=0.,
                 bias=True):
        super(RENet, self).__init__()

        self.num_nodes = num_nodes
        self.hidden_channels = hidden_channels
        self.num_rels = num_rels
        self.seq_len = seq_len
        self.dropout = dropout

        self.ent = Parameter(torch.Tensor(num_nodes, hidden_channels))
        self.rel = Parameter(torch.Tensor(num_rels, hidden_channels))

        self.sub_gru = GRU(3 * hidden_channels,
                           hidden_channels,
                           num_layers,
                           batch_first=True,
                           bias=bias)
        self.obj_gru = GRU(3 * hidden_channels,
                           hidden_channels,
                           num_layers,
                           batch_first=True,
                           bias=bias)

        self.sub_lin = Linear(3 * hidden_channels, num_nodes, bias=bias)
        self.obj_lin = Linear(3 * hidden_channels, num_nodes, bias=bias)

        self.reset_parameters()
Пример #4
0
 def __init__(self,dim=64,edge_dim=12,node_in=8,edge_in=19,edge_in3=8,num_layers=1):
     super(Net_int_2Edges_Set2Set3, self).__init__()
     self.num_layers = num_layers
     self.dim = dim
     self.lin_node = torch.nn.Linear(node_in, dim)
     self.lin_edge_attr = torch.nn.Linear(edge_in, edge_dim)
     
     nn1 = Linear(edge_dim, dim * dim, bias=False)
     nn2 = Linear(edge_in3, dim * dim * 2 * 2, bias=False)
     
     self.conv1 = NNConv(dim, dim, nn1, aggr='mean', root_weight=False)
     self.gru1 = GRU(dim, dim)
     self.lin_covert = Sequential(BatchNorm1d(dim),Linear(dim, dim*2), \
                                  RReLU(), Dropout(),Linear(dim*2, dim*2),RReLU())
     
     self.conv2 = NNConv(dim*2, dim*2, nn2, aggr='mean', root_weight=False)
     self.gru2 = GRU(dim*2, dim*2)
     
     self.lin_weight = Linear(edge_in3, dim*2*2, bias=False)
     self.lin_bias = Linear(edge_in3, 1, bias=False)
     
     self.norm = BatchNorm1d(dim*2*2)
     self.norm_x = BatchNorm1d(node_in)
     self.head = Set2Set(dim*2,processing_steps=3,num_layers=num_layers)
     self.pool = Set2Set(dim*2,processing_steps=3)
     
     self.h_lin = Linear(edge_in3,num_layers*dim*2*2)
     self.q_star_lin = Linear(dim*2*2,dim*2*2)
Пример #5
0
    def __init__(self,
                 hidden_size=100,
                 num_layers=1,
                 num_roads=192,
                 prev_timesteps=6,
                 prediction_steps=6):
        super().__init__(name="Sequence2Sequence")

        self.prev_timesteps = prev_timesteps
        self.num_roads = num_roads
        self.prediction_steps = prediction_steps

        self.num_layers = num_layers
        self.hidden_size = hidden_size

        self.encoder = GRU(num_roads,
                           hidden_size,
                           batch_first=True,
                           num_layers=num_layers)

        self.decoder = GRU(num_roads,
                           hidden_size,
                           batch_first=True,
                           num_layers=num_layers)
        #self.activation = Sig()
        self.decoder_l1 = Linear(hidden_size, num_roads)

        self.criterion = L1Loss()
Пример #6
0
class Encoder(nn.Module):
    def __init__(self, input_dim, hidden_dim, bidirectional=False, embedding=None, cell='LSTM', num_layers=1):
        super(Encoder, self).__init__()
        if embedding =='mlp':
            self.embedding = nn.Sequential(nn.Linear(input_dim, hidden_dim), nn.Tanh())
            rnn_input = hidden_dim
        else:
            self.embedding = nn.Identity()
            rnn_input = input_dim
        
        if bidirectional:
            hidden_dim = hidden_dim//2

        if cell == 'LSTM':
            self.rnn = LSTM(input_size=rnn_input, hidden_size=hidden_dim, num_layers=num_layers, bidirectional=bidirectional, batch_first=True)
        elif cell == 'GRU':
            self.rnn = GRU(input_size=rnn_input, hidden_size=hidden_dim, num_layers=num_layers, bidirectional=bidirectional, batch_first=True)

        # self.rnn.flatten_parameters()

    def forward(self, seq):
        if len(seq.shape)>3:
            seq = reshap_input_for_lstm(seq)
        seq = seq.float()
        # t, b, f = seq.shape
        b, t, f = seq.shape
        
        seq = self.embedding(seq.reshape([-1, f])).reshape([b, t, -1])

        self.rnn.flatten_parameters()
        seq, _ = self.rnn(seq)

        return seq
Пример #7
0
    def __init__(self,
                 embedding_size,
                 hidden_size,
                 output_size,
                 num_layers=1,
                 dropout=0.0,
                 length=20,
                 device=torch.device('cuda:1')):
        super(Decoder, self).__init__()
        self.embedding = Embedding(output_size, embedding_dim=embedding_size)
        self.gru = GRU(input_size=embedding_size,
                       hidden_size=hidden_size,
                       num_layers=num_layers,
                       dropout=dropout,
                       bidirectional=False)
        hidden_size_2 = 300
        self.output = Linear(in_features=hidden_size_2,
                             out_features=output_size)

        self.hidden2 = Linear(hidden_size, hidden_size_2)

        self.length = length
        self.output_size = output_size
        self.device = device
        #  do orthogonal initialization
        for name, param in self.gru.named_parameters():
            if 'bias' in name:
                nn.init.constant_(param, 0.0)
            elif 'weight_ih' in name:
                nn.init.kaiming_normal_(param)
            elif 'weight_hh' in name:
                nn.init.orthogonal_(param)
    def __init__(self, n_steps_in, n_steps_out, n_features, n_cells):
        super(Net, self).__init__()
        self.n_steps_in = n_steps_in
        self.n_steps_out = n_steps_out
        self.n_features = n_features
        self.n_cells = n_cells

        self.gru0 = GRU(n_features,
                        self.n_cells,
                        4,
                        dropout=0.1,
                        bidirectional=True,
                        batch_first=True)
        self.gru1 = GRU(self.n_cells * 2,
                        self.n_cells,
                        3,
                        dropout=0.1,
                        bidirectional=True,
                        batch_first=True)
        self.gru2 = GRU(self.n_cells * 2,
                        self.n_cells,
                        3,
                        dropout=0.1,
                        bidirectional=True,
                        batch_first=True)

        self.linear1 = Linear(2 * n_cells, 30)
        self.linear2 = Linear(2 * n_cells, 30)

        self.linear1_2 = Linear(30, 1)
        self.linear2_2 = Linear(30, 1)

        self.relu = ReLU()
Пример #9
0
class Decoder(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim, bidirectional=False, cell='LSTM', num_layers=1):
        super(Decoder, self).__init__()
        
        rnn_hidden_dim = hidden_dim//2 if bidirectional else hidden_dim
        
        if cell == 'LSTM':
            self.rnn = LSTM(input_size=input_dim, hidden_size=rnn_hidden_dim, num_layers=num_layers, bidirectional=bidirectional, batch_first=True)
        elif cell == 'GRU':
            self.rnn = GRU(input_size=input_dim, hidden_size=rnn_hidden_dim, num_layers=num_layers, bidirectional=bidirectional, batch_first=True)

        rnn_hidden_output = rnn_hidden_dim * 2 if bidirectional else hidden_dim
        
        self.output_lin = nn.Linear(rnn_hidden_output, output_dim)
        # self.output_lin = nn.Sequential(nn.Linear(rnn_hidden_output, output_dim), nn.Tanh())

    def forward(self, seq):
        # seq = seq.permute(1, 0, 2).contiguous()  # t,b,h
        self.rnn.flatten_parameters()

        seq, _ = self.rnn(seq)
        
        # seq = seq.permute(1, 0, 2).contiguous()  # b,t,h
        
        # t, b, f = seq.shape
        b, t, f = seq.shape
        output = self.output_lin(seq.reshape([-1, f])).reshape([b, t, -1])
        return output
Пример #10
0
    def __init__(self, cfg, **kwargs):
        super(Model, self).__init__()
        self.enc_hidden_sz = cfg.MODEL.ENC_HIDDEN_SZ
        self.ctx_hidden_sz = cfg.MODEL.CTX_HIDDEN_SZ
        self.dec_hidden_sz = cfg.MODEL.DEC_HIDDEN_SZ
        self.input_sz = cfg.MODEL.EMBED_SZ
        self.ctx_in_sz = cfg.MODEL.CTX_IN_SZ
        self.num_layers = cfg.MODEL.NUM_LAYERS

        self.embed = torch.nn.Embedding(kwargs['vocab_size'], cfg.MODEL.EMBED_SZ)

        self.enc = GRU(input_size=self.input_sz,
                       hidden_size=self.enc_hidden_sz,
                       num_layers=self.num_layers,
                       batch_first=True
                       )
        self.enc_to_ctx = nn.Linear(self.enc_hidden_sz, self.ctx_in_sz)
        self.ctx = GRU(input_size=self.ctx_in_sz,
                       hidden_size=self.ctx_hidden_sz,
                       num_layers=self.num_layers,
                       batch_first=True
                       )
        self.ctx_to_dec = nn.Linear(self.ctx_hidden_sz, self.dec_hidden_sz)
        self.dec = GRU(input_size=self.input_sz,
                       hidden_size=self.dec_hidden_sz,
                       num_layers=self.num_layers,
                       batch_first=True
                       )
    def __init__(self, input_size, hidden_size, pad_idx,
                 vocab_size):
        super().__init__()

        self.embedding = Embedding(
            num_embeddings=vocab_size,
            embedding_dim=input_size,
            padding_idx=pad_idx)

        self.dropout = Dropout(p=0.1)

        self.merge = Linear(
            in_features=hidden_size * 2,
            out_features=hidden_size,
            bias=False)

        # creating rnn layer as module list so locked
        # dropout can be applied between each layer
        # NOTE: currently not using weight drop, because
        # it is incompatible with apex
        self.rnn = ModuleList([
            GRU(input_size=input_size,
                hidden_size=hidden_size,
                bidirectional=True,
                batch_first=True)] + [
            GRU(input_size=hidden_size,
                hidden_size=hidden_size,
                batch_first=True)
            for _ in range(2)
        ])
Пример #12
0
    def __init__(self,
                 sample_size,
                 conv_bank_max_filter_size=16,
                 conv_projections_channel_size=(128, 128),
                 num_highways=4):
        super(CBHG, self).__init__()
        self.sample_size = sample_size
        self.relu = ReLU()

        # conv_bank_max_filter_size sets of 1-D convolutional filters
        self.conv1d_banks = []
        for k in range(1, conv_bank_max_filter_size + 1):
            self.conv1d_banks.append(
                BatchNormConv1d(in_channels=sample_size,
                                out_channels=sample_size,
                                kernel_size=k,
                                stride=1,
                                padding=k // 2,
                                activation=self.relu))
        self.conv1d_banks = ModuleList(modules=self.conv1d_banks)

        # max pooling of conv bank (to increase local invariances)
        self.max_pool1d = MaxPool1d(kernel_size=2, stride=1, padding=1)

        out_features = [conv_bank_max_filter_size * sample_size
                        ] + conv_projections_channel_size[:-1]
        activations = [self.relu
                       ] * (len(conv_projections_channel_size) - 1) + [None]

        # conv1d projection layers
        self.conv1d_projections = []
        for (in_size, out_size, ac) in zip(out_features,
                                           conv_projections_channel_size,
                                           activations):
            self.conv1d_projections.append(
                BatchNormConv1d(in_channels=in_size,
                                out_channels=out_size,
                                kernel_size=3,
                                stride=1,
                                padding=1,
                                activation=ac))
        self.conv1d_projections = ModuleList(modules=self.conv1d_projections)

        # Highway layers
        self.pre_highway = Linear(
            in_features=conv_projections_channel_size[-1],
            out_features=sample_size,
            bias=False)
        self.highways = ModuleList(modules=[
            Highway(in_size=sample_size, out_size=sample_size)
            for _ in range(num_highways)
        ])

        # bi-directional GPU layer
        self.gru = GRU(input_size=sample_size,
                       hidden_size=sample_size,
                       num_layers=1,
                       batch_first=True,
                       bidirectional=True)
Пример #13
0
def gru_test(nsteps):
    gru_net = GRU(input_size=4, hidden_size=20, num_layers=3, batch_first=False, dropout=0.0, bidirectional=True)
    gru_net.cuda(device=0)

    with Profile("gru"):
        for i in range(nsteps):
            gru_func(max_length=450, seq_length=350, gru_net=gru_net)
    print(Profile.to_string())
Пример #14
0
    def __init__(
        self,
        weight_file: str,
        uid_column: CategoricalColumnWithIdentity,
        iid_column: CategoricalColumnWithIdentity,
        pos_state_len_column: CategoricalColumnWithIdentity,
        pos_state_column: CategoricalColumnWithIdentity,
        pos_next_state_len_column: CategoricalColumnWithIdentity,
        pos_next_state_column: CategoricalColumnWithIdentity,
        neg_state_len_column: CategoricalColumnWithIdentity,
        neg_state_column: CategoricalColumnWithIdentity,
        neg_next_state_len_column: CategoricalColumnWithIdentity,
        neg_next_state_column: CategoricalColumnWithIdentity,
        rl_sample_column: CategoricalColumnWithIdentity,
        emb_size: int,
        hidden_size: int,
    ):
        super().__init__()
        self.emb_size = emb_size
        self.hidden_size = hidden_size
        self.uid_column = uid_column
        self.iid_column = iid_column
        self.pos_state_len_column = pos_state_len_column
        self.pos_state_column = pos_state_column
        self.pos_next_state_len_column = pos_next_state_len_column
        self.pos_next_state_column = pos_next_state_column
        self.neg_state_len_column = neg_state_len_column
        self.neg_state_column = neg_state_column
        self.neg_next_state_len_column = neg_next_state_len_column
        self.neg_next_state_column = neg_next_state_column
        self.rl_sample_column = rl_sample_column
        self.weight_file = weight_file

        self.i_embeddings = Embedding(self.iid_column.category_num,
                                      self.emb_size)

        self.pos_rnn = GRU(input_size=self.emb_size,
                           hidden_size=self.hidden_size,
                           batch_first=True)
        self.pos_mlp = MLP(self.hidden_size + self.emb_size,
                           [self.emb_size] * 3,
                           activation="relu",
                           dropout=0.2)

        self.neg_rnn = GRU(input_size=self.emb_size,
                           hidden_size=self.hidden_size,
                           batch_first=True)
        self.neg_mlp = MLP(self.hidden_size + self.emb_size,
                           [self.emb_size] * 3,
                           activation="relu",
                           dropout=0.2)

        self.mlp = Dense(self.emb_size * 2,
                         self.emb_size,
                         activation="relu",
                         dropout=0.2)
        self.prediction = Linear(self.emb_size, 1, bias=False)
Пример #15
0
class Net(nn.Module):
    def __init__(self, large=True, lstm=True):
        super(Net, self).__init__()
        self.large = large
        self.lstm = lstm
        self.relu = nn.ReLU()
        if lstm:
            self.hidden = self.init_hidden()
            self.rnn = LSTM(input_size=FEATURES,
                            hidden_size=FRAMES,
                            num_layers=1,
                            batch_first=True)
        else:
            self.rnn = GRU(input_size=FEATURES,
                           hidden_size=FRAMES,
                           num_layers=1,
                           batch_first=True)

        if large:
            self.lin1 = nn.Linear(FRAMES**2, 26)
            self.lin2 = nn.Linear(26, 2)
        else:
            self.lin = nn.Linear(FRAMES**2, 2)

        self.softmax = nn.Softmax(dim=1)

    def init_hidden(self):
        h = Variable(torch.zeros(1, BATCH_SIZE, FRAMES))
        c = Variable(torch.zeros(1, BATCH_SIZE, FRAMES))

        if OBJ_CUDA:
            h = h.cuda()
            c = c.cuda()

        return h, c

    def forward(self, x):
        if OBJ_CUDA:
            self.rnn.flatten_parameters()

        # (batch, frames, features)
        if hasattr(self, 'lstm') and self.lstm:
            x, _ = self.rnn(x, self.hidden)
        else:
            x, _ = self.rnn(x).cuda()

        x = x.contiguous().view(-1, FRAMES**2)

        # (batch, units)
        if self.large:
            x = self.relu(self.lin1(x))
            x = self.lin2(x)
        else:
            x = self.lin(x)

        return self.softmax(x)
Пример #16
0
class RNNLayer(torch.nn.Module):
    def __init__(self,
                 idim: int,
                 hdim: int,
                 nlayers: int = 1,
                 enc_type: str = "blstm"):
        """
        This represents the computation that happens for 1 RNN Layer
        Uses packing,padding utils from Pytorch
        :param int input_dim- The input size of the RNN
        :param int hidden_dim- The hidden size of the RNN
        :param int nlayers- Number of RNN Layers
        :param str enc_type : Type of encoder- RNN/GRU/LSTM
        """
        super(RNNLayer, self).__init__()
        bidir = True if enc_type[0] == 'b' else False
        enc_type = enc_type[1:] if enc_type[0] == 'b' else enc_type
        if enc_type == "rnn":
            self.elayer = RNN(idim,
                              hdim,
                              nlayers,
                              batch_first=True,
                              bidirectional=bidir)
        elif enc_type == "lstm":
            self.elayer = LSTM(idim,
                               hdim,
                               nlayers,
                               batch_first=True,
                               bidirectional=bidir)
        else:
            self.elayer = GRU(idim,
                              hdim,
                              nlayers,
                              batch_first=True,
                              bidirectional=bidir)

    def forward(self, x: torch.Tensor, inp_lens: torch.LongTensor):
        """
        Foward propogation for the RNNLayer
        :params torch.Tensor x - Input Features
        :params torch.LongTensor inp_lens - Input lengths without padding
        :returns torch.Tensor Encoded output
        :returns list Encoded output lengths
        :returns Encoder hidden state
        """
        total_length = x.size(1)
        packed_x = pack_padded_sequence(x, inp_lens, batch_first=True)
        self.elayer.flatten_parameters()
        output, (hidden, _) = self.elayer(packed_x)
        unpacked_out, inp_lens = pad_packed_sequence(output,
                                                     batch_first=True,
                                                     total_length=total_length)
        return unpacked_out, inp_lens, hidden
Пример #17
0
    def __init__(self, input_dim, hidden_dim, output_dim, bidirectional=False, cell='LSTM', num_layers=1):
        super(Decoder, self).__init__()
        
        rnn_hidden_dim = hidden_dim//2 if bidirectional else hidden_dim
        
        if cell == 'LSTM':
            self.rnn = LSTM(input_size=input_dim, hidden_size=rnn_hidden_dim, num_layers=num_layers, bidirectional=bidirectional, batch_first=True)
        elif cell == 'GRU':
            self.rnn = GRU(input_size=input_dim, hidden_size=rnn_hidden_dim, num_layers=num_layers, bidirectional=bidirectional, batch_first=True)

        rnn_hidden_output = rnn_hidden_dim * 2 if bidirectional else hidden_dim
        
        self.output_lin = nn.Linear(rnn_hidden_output, output_dim)
    def __init__(self,
                 input_dim: int,
                 hidden_dim: int,
                 output_dim: int,
                 dropout_p: float) \
            -> None:
        """Encoder module.

        :param input_dim: Input dimensionality.
        :type input_dim: int
        :param hidden_dim: Hidden dimensionality.
        :type hidden_dim: int
        :param output_dim: Output dimensionality.
        :type output_dim: int
        :param dropout_p: Dropout.
        :type dropout_p: float
        """
        super(Encoder, self).__init__()

        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.output_dim = output_dim

        self.dropout: Module = Dropout(p=dropout_p)

        self.gru_1: GRU = GRU(input_size=self.input_dim,
                              hidden_size=self.hidden_dim,
                              num_layers=1,
                              bias=True,
                              batch_first=True,
                              bidirectional=True)

        self.gru_2: GRU = GRU(input_size=self.hidden_dim * 2,
                              hidden_size=self.hidden_dim,
                              num_layers=1,
                              bias=True,
                              batch_first=True,
                              bidirectional=True)

        self.gru_3: GRU = GRU(input_size=self.hidden_dim * 2,
                              hidden_size=self.output_dim,
                              num_layers=1,
                              bias=True,
                              batch_first=True,
                              bidirectional=True)

        self.gru_1.flatten_parameters()
        self.gru_2.flatten_parameters()
        self.gru_3.flatten_parameters()
Пример #19
0
    def _create_layers(self):

        self.recurrent_layer = GRU(input_size=self.in_channels,
                                   hidden_size=self.in_channels,
                                   num_layers=1)
        for param in self.recurrent_layer.parameters():
            param.requires_grad = True
            param.retain_grad()

        self.conv_layer = GCNConv_Fixed_W(in_channels=self.in_channels,
                                          out_channels=self.in_channels,
                                          improved=self.improved,
                                          cached=self.cached,
                                          normalize=self.normalize,
                                          add_self_loops=self.add_self_loops)
Пример #20
0
 def __init__(self, dim, edge_dim, heads=4, time_step=3):
     super(Block, self).__init__()
     self.time_step = time_step
     self.conv = MultiHeadTripletAttention(
         dim, edge_dim)  # GraphMultiHeadAttention
     self.gru = GRU(dim, dim)
     self.ln = NodeLevelLayerNorm(dim)
Пример #21
0
    def __init__(self, args, device):
        super(Net, self).__init__()

        self.args = args
        self.device = device

        node_dim = self.args.node_dim
        edge_dim = self.args.edge_dim
        hidden_dim = self.args.hidden_dim
        processing_steps = self.args.processing_steps
        self.depth = self.args.depth

        self.lin0 = torch.nn.Linear(node_dim, hidden_dim)
        nn = Sequential(Linear(edge_dim, hidden_dim * 2), ReLU(),
                        Linear(hidden_dim * 2, hidden_dim * hidden_dim))
        self.conv = NNConv(hidden_dim, hidden_dim, nn, aggr='mean')
        self.gru = GRU(hidden_dim, hidden_dim)

        self.set2set = Set2Set(hidden_dim, processing_steps=processing_steps)
        self.lin1 = torch.nn.Linear(2 * hidden_dim, hidden_dim)
        self.lin2 = torch.nn.Linear(hidden_dim, 1)

        self.lin3 = torch.nn.Linear(hidden_dim, 36)
        self.lin4 = torch.nn.Linear(36, 2)

        self.lin5 = torch.nn.Linear(hidden_dim, 36)
        self.lin6 = torch.nn.Linear(36, 2)

        self.apply(init_weights)
Пример #22
0
    def __init__(self, is_gru, input_size, hidden_size, bidirectional,
                 stacked_layers):
        """
        :param is_gru: GRU cell type if true, otherwise LSTM
        :param input_size: the size of the tensors that will be used as input (embeddings or projected embeddings)
        :param hidden_size: the size of the cell
        :param bidirectional: boolean
        :param stacked_layers: the number of stacked layers
        """
        super(CellLayer, self).__init__()
        if is_gru:
            self.cell = GRU(input_size=input_size,
                            hidden_size=hidden_size,
                            batch_first=True,
                            bidirectional=bidirectional,
                            num_layers=stacked_layers)

        else:
            self.cell = LSTM(input_size=input_size,
                             hidden_size=hidden_size,
                             batch_first=True,
                             bidirectional=bidirectional,
                             num_layers=stacked_layers)

        self._output_size = hidden_size * 2 if bidirectional else hidden_size
        self._input_size = input_size
Пример #23
0
def build_encoder(args, vocab):
    """Builds the encoder to params."""

    input_size = len(vocab.source)
    rnn_layer = None
    bidirectional = False if args.encoder_mode != 'bigru' else True
    dropout = args.rnn_dropout if args.encoder_layers != 1 else 0

    if args.encoder_mode == 'rnn':
        rnn_layer = RNN(args.hidden_size,
                        args.hidden_size,
                        num_layers=args.encoder_layers,
                        dropout=dropout,
                        batch_first=True)
    elif args.encoder_mode == 'gru' or args.encoder_mode == 'bigru':
        rnn_layer = GRU(args.hidden_size,
                        args.hidden_size,
                        num_layers=args.encoder_layers,
                        dropout=dropout,
                        bidirectional=bidirectional,
                        batch_first=True)
    else:
        raise ValueError('Invalid encoder mode: %s' % (args.encoder_mode))

    return Encoder(input_size,
                   args.hidden_size,
                   rnn_layer,
                   bidirectional=bidirectional)
Пример #24
0
 def __init__(
     self,
     in_dim=15,
     out_dim=64,
     edge_in_dim=7,
 ):
     super(ResidualMessagePassingBlock, self).__init__()
     edge_nn = Sequential(Linear(edge_in_dim, 64), ReLU(),
                          Linear(64, in_dim * in_dim))
     self.mp = NNConv(in_dim,
                      in_dim,
                      edge_nn,
                      aggr='mean',
                      root_weight=True)
     edge_nn2 = Sequential(Linear(edge_in_dim, 64), ReLU(),
                           Linear(64, in_dim * in_dim))
     self.dmp = DirectedMessagePassing(in_dim,
                                       in_dim,
                                       edge_nn2,
                                       aggr='mean',
                                       root_weight=True)
     self.fu = GRU(in_dim, in_dim)
     self.lin = torch.nn.Linear(in_dim, out_dim)
     self.bn = NodeLevelBatchNorm(num_features=out_dim)
     self.relu = torch.nn.ReLU()
     self.short_cut = Sequential(nn.Linear(in_dim, out_dim))
Пример #25
0
 def __init__(self,
              input_dim=129,
              filter_dim=20,
              n_channels=1,
              hidden_layers=64,
              bidir=True,
              dropout=0.25,
              dropout2d=0.):
     """
     input_dim (int): size of the original filter bank
     filter_dim (int): size of the fitted filter bank
     n_channels (int): number of distinct signals (i.e. number of channels in the time-frequency representation)
     context_size (int): context size of the attention module
     hidden_layers (int): Hidden layers in the GRU module
     bidir  (bool): Use bidirectionnal gru ?
     """
     super().__init__()
     self.dropout = nn.Dropout(dropout)
     self.dropout2D = nn.Dropout2d(dropout2d)
     self.hidden_size = hidden_layers
     self.num_layers = 1
     self.filter_bank = FilterBankLayer(input_dim, filter_dim, n_channels)
     self.gru = GRU(bidirectional=bidir,
                    input_size=filter_dim * n_channels,
                    num_layers=self.num_layers,
                    batch_first=True,
                    hidden_size=self.hidden_size)
Пример #26
0
 def __init__(self,dim=64,edge_dim=12,node_in=8,edge_in=19,edge_in3=8):
     super(Net_int_2Edges_attention, self).__init__()
     self.lin_node = torch.nn.Linear(node_in, dim)
     
     self.conv1 = GATConv(dim, dim, negative_slope=0.2, dropout=0.1, bias=True)
     self.gru1 = GRU(dim, dim)
     self.lin_covert = Sequential(BatchNorm1d(dim),Linear(dim, dim*2), \
                                  RReLU(), Dropout(),Linear(dim*2, dim*2),RReLU())
     
     self.conv2 = GATConv(dim*2, dim*2, negative_slope=0.2, dropout=0.1, bias=True)
     self.gru2 = GRU(dim*2, dim*2)
     
     self.lin_weight = Linear(8, dim*3*2, bias=False)
     self.lin_bias = Linear(8, 1, bias=False)
     self.norm = BatchNorm1d(dim*3*2)
     self.norm_x = BatchNorm1d(node_in)
Пример #27
0
    def __init__(
        self,
        weight_file: str,
        iid_column: CategoricalColumnWithIdentity,
        state_len_column: CategoricalColumnWithIdentity,
        state_column: CategoricalColumnWithIdentity,
        next_state_len_column: CategoricalColumnWithIdentity,
        next_state_column: CategoricalColumnWithIdentity,
        rl_sample_column: CategoricalColumnWithIdentity,
        emb_size: int,
        hidden_size: int,
    ):
        super().__init__()
        self.emb_size = emb_size
        self.hidden_size = hidden_size
        self.iid_column = iid_column
        self.state_len_column = state_len_column
        self.state_column = state_column
        self.next_state_len_column = next_state_len_column
        self.next_state_column = next_state_column
        self.rl_sample_column = rl_sample_column
        self.weight_file = weight_file

        self.i_embedding = Embedding(self.iid_column.category_num,
                                     self.emb_size)
        self.rnn = GRU(input_size=self.emb_size,
                       hidden_size=self.hidden_size,
                       batch_first=True)
        self.out = Linear(self.hidden_size, self.emb_size)
Пример #28
0
    def __init__(self, vocabulary_size, sos_token, eos_token, pad_token, attention_size=default_attention['size'],
                 embedding_size=default_embedding['size'], hidden_size=default_gru['hidden_size'],
                 num_layers=default_gru['num_layers'], dropout=default_gru['dropout'], shift_focus=True):
        super().__init__()
        self.attention_size = attention_size
        self.vocabulary_size = vocabulary_size

        self.embedding = Embedding(vocabulary_size, embedding_size, padding_idx=0)
        self.gru1 = GRU(2 * hidden_size + embedding_size, hidden_size, num_layers=num_layers, batch_first=True,
                        dropout=dropout, bidirectional=True)
        self.gru2 = GRU(2 * hidden_size + embedding_size, hidden_size, num_layers=num_layers, batch_first=True,
                        dropout=dropout, bidirectional=True)
        self.decoder = EmptyDecoderAndPointer(vocabulary_size, embedding_size, hidden_size, attention_size, pad_token,
                                              shift_focus=shift_focus)
        self.sos_token = sos_token
        self.eos_token = eos_token
Пример #29
0
    def __init__(self,
                 vocab_size,
                 emb_dim,
                 hidden_size,
                 weight,
                 kqv_dim,
                 rnn_type='gru',
                 bidirectional=False,
                 batch_first=False,
                 padding_idx=None):
        super(ZXOTextEncoder, self).__init__()
        self.embed = nn.Embedding(vocab_size,
                                  embedding_dim=emb_dim,
                                  _weight=weight)
        if rnn_type == 'rnn':
            self.rnn = RNN(emb_dim,
                           hidden_size,
                           bidirectional=bidirectional,
                           num_layers=6,
                           batch_first=batch_first)
        elif rnn_type == 'gru':
            self.rnn = GRU(emb_dim,
                           hidden_size,
                           bidirectional=bidirectional,
                           num_layers=6,
                           batch_first=batch_first)
        elif rnn_type == 'lstm':
            self.rnn = LSTM(emb_dim,
                            hidden_size,
                            bidirectional=bidirectional,
                            num_layers=6,
                            batch_first=batch_first)

        self.attn = Attn(emb_dim, kqv_dim)
        self.linear = nn.Linear(emb_dim, 2)
Пример #30
0
    def __init__(self):
        super(Net, self).__init__()
        self.lin0 = torch.nn.Linear(dataset.num_features, dim)

        nn0 = Sequential(Linear(2, 64), ReLU(), Linear(64, 2*dim*dim))
        nn = Sequential(Linear(2, 64), ReLU(), Linear(64, dim * dim))
        #nn = Sequential(Linear(5, dim * dim))
        self.conv0 = NNConv(dim, 2*dim, nn0, aggr='mean')
        self.gru0 = GRU(2*dim, dim)

        self.conv = NNConv(dim, dim, nn, aggr='mean')
        self.gru = GRU(dim, dim)

        self.set2set = Set2Set(dim, processing_steps=1)
        self.lin1 = torch.nn.Linear(2 * dim, dim)
        self.lin2 = torch.nn.Linear(dim, 1)