Exemplo n.º 1
0
    def __init__(self,
                 input_dim,
                 hidden_dim,
                 output_dim,
                 batch_size,
                 n_layers,
                 backwards,
                 drop_prob=0.5,
                 GPUs=1):
        super(GRUNet, self).__init__()
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.output_dim = output_dim
        self.batch_size = batch_size
        self.n_layers = n_layers

        self.backwards = backwards
        self.GPUs = GPUs
        self.drop_prob = drop_prob

        self.drop = nn.Dropout(p=drop_prob)
        self.inp_width = int(self.backwards * self.input_dim)

        self.GRU = GRU(input_size=input_dim,
                       hidden_size=hidden_dim,
                       num_layers=n_layers)
        '''
        self.r1_dnet = Deepnet(self.inp_width,
                               [[hidden_dim, hidden_dim, hidden_dim, hidden_dim, hidden_dim],
                                [hidden_dim, hidden_dim, hidden_dim, hidden_dim],
                                [hidden_dim, hidden_dim, hidden_dim]],
                               self.inp_width, nested=2, droprate=drop_prob)
        '''

        self.out_lin = nn.Linear(hidden_dim, output_dim)
Exemplo n.º 2
0
    def __init__(self,
                 input_dim,
                 hidden_dim,
                 output_dim,
                 batch_size,
                 n_layers,
                 backwards,
                 drop_prob=0.10,
                 GPUs=1):
        super(GRUNet, self).__init__()
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.output_dim = output_dim
        self.batch_size = batch_size
        self.n_layers = n_layers

        self.backwards = backwards
        self.GPUs = GPUs
        self.drop_prob = drop_prob

        self.hidden_dim = hidden_dim
        self.n_layers = n_layers
        self.attn_layers = 6
        self.GPUs = GPUs

        self.drop = nn.Dropout(p=drop_prob)

        self.lin_in = nn.Linear(input_dim, hidden_dim)

        self.attn_list = nn.ModuleList()
        self.layernorm = nn.ModuleList()
        for i in range(self.attn_layers):
            self.attn_list.append(
                torch.nn.MultiheadAttention(hidden_dim, int(hidden_dim / 4)))
            self.layernorm.append(nn.LayerNorm(hidden_dim))

        self.gru1 = GRU(int(input_dim / 1),
                        hidden_dim,
                        n_layers,
                        drop_prob,
                        batch_first=True)

        self.r1_dnet = Deepnet(self.hidden_dim,
                               [[hidden_dim, hidden_dim, hidden_dim],
                                [hidden_dim, hidden_dim, hidden_dim],
                                [hidden_dim, hidden_dim, hidden_dim],
                                [hidden_dim, hidden_dim, hidden_dim]],
                               self.hidden_dim,
                               nested=0,
                               droprate=drop_prob)
        self.fc1 = nn.Linear(backwards * hidden_dim, hidden_dim)
        self.fc2 = nn.Linear(hidden_dim, hidden_dim)
        self.fc3 = nn.Linear(hidden_dim, output_dim)
        self.relu = nn.LeakyReLU()
Exemplo n.º 3
0
    def __init__(self, input_dim, hidden_dim, output_dim, batch_size, n_layers, backwards, drop_prob=0.10, GPUs=1):
        super(GRUNet, self).__init__()
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.output_dim = output_dim
        self.batch_size = batch_size
        self.n_layers = n_layers

        self.backwards = backwards
        self.GPUs = GPUs
        self.drop_prob = drop_prob

        self.hidden_dim = hidden_dim
        self.n_layers = n_layers
        self.GPUs = GPUs

        self.drop = nn.Dropout(p=drop_prob)
        '''
        self.conv1 = nn.Conv1d(backwards,backwards*3,kernel_size=1,stride=1,padding=0)
        #b*3, i
        self.conv2 = nn.Conv2d(1,10,kernel_size=(7,3),stride=1,padding=(3,1))
        #10,b*3,i
        self.pool1 = nn.MaxPool2d((7,3),stride=(1,3),padding=(3,0))
        #10,b*3,i/3
        self.redu1 = nn.Conv2d(10,1,kernel_size=1,stride=1,padding=0)
        #1,b*3,i/3
        '''
        self.mha1 = torch.nn.MultiheadAttention(input_dim, input_dim)
        self.mha2 = torch.nn.MultiheadAttention(hidden_dim, hidden_dim//4)
        self.mha3 = torch.nn.MultiheadAttention(hidden_dim, hidden_dim//4)

        self.gru1 = GRU(int(input_dim/1), hidden_dim, n_layers, drop_prob, batch_first=True)

        self.r1_dnet = Deepnet(self.hidden_dim,
                               [[hidden_dim, hidden_dim, hidden_dim],
                                [hidden_dim, hidden_dim, hidden_dim],
                                [hidden_dim, hidden_dim, hidden_dim],
                                [hidden_dim, hidden_dim, hidden_dim]],
                               self.hidden_dim, nested=0, droprate=drop_prob)
        self.fc1 = nn.Linear(backwards*input_dim, hidden_dim)
        self.fc2 = nn.Linear(hidden_dim, hidden_dim)
        self.fc3 = nn.Linear(hidden_dim, output_dim)
        self.relu = nn.LeakyReLU()
Exemplo n.º 4
0
class GRUNet(nn.Module):
    def __init__(self,
                 input_dim,
                 hidden_dim,
                 output_dim,
                 batch_size,
                 n_layers,
                 backwards,
                 drop_prob=0.5,
                 GPUs=1):
        super(GRUNet, self).__init__()
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.output_dim = output_dim
        self.batch_size = batch_size
        self.n_layers = n_layers

        self.backwards = backwards
        self.GPUs = GPUs
        self.drop_prob = drop_prob

        self.drop = nn.Dropout(p=drop_prob)
        self.inp_width = int(self.backwards * self.input_dim)

        self.GRU = GRU(input_size=input_dim,
                       hidden_size=hidden_dim,
                       num_layers=n_layers)
        '''
        self.r1_dnet = Deepnet(self.inp_width,
                               [[hidden_dim, hidden_dim, hidden_dim, hidden_dim, hidden_dim],
                                [hidden_dim, hidden_dim, hidden_dim, hidden_dim],
                                [hidden_dim, hidden_dim, hidden_dim]],
                               self.inp_width, nested=2, droprate=drop_prob)
        '''

        self.out_lin = nn.Linear(hidden_dim, output_dim)

    def init_hidden(self, device):
        h = self.GRU.init_hidden(device)
        return h

    def forward(self, x, h):
        bs = x.shape[0]
        #out = x.view(bs,self.inp_width)

        #out = self.r1_dnet(self.drop(out))

        #out = self.drop(out)

        out, h = self.GRU(x, h)
        out = self.out_lin(out[:, -1])
        return out, h
Exemplo n.º 5
0
class GRUNet(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim, batch_size, n_layers, backwards, drop_prob=0.10, GPUs=1):
        super(GRUNet, self).__init__()
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.output_dim = output_dim
        self.batch_size = batch_size
        self.n_layers = n_layers

        self.backwards = backwards
        self.GPUs = GPUs
        self.drop_prob = drop_prob

        self.hidden_dim = hidden_dim
        self.n_layers = n_layers
        self.GPUs = GPUs

        self.drop = nn.Dropout(p=drop_prob)
        '''
        self.conv1 = nn.Conv1d(backwards,backwards*3,kernel_size=1,stride=1,padding=0)
        #b*3, i
        self.conv2 = nn.Conv2d(1,10,kernel_size=(7,3),stride=1,padding=(3,1))
        #10,b*3,i
        self.pool1 = nn.MaxPool2d((7,3),stride=(1,3),padding=(3,0))
        #10,b*3,i/3
        self.redu1 = nn.Conv2d(10,1,kernel_size=1,stride=1,padding=0)
        #1,b*3,i/3
        '''
        self.mha1 = torch.nn.MultiheadAttention(input_dim, input_dim)
        self.mha2 = torch.nn.MultiheadAttention(hidden_dim, hidden_dim//4)
        self.mha3 = torch.nn.MultiheadAttention(hidden_dim, hidden_dim//4)

        self.gru1 = GRU(int(input_dim/1), hidden_dim, n_layers, drop_prob, batch_first=True)

        self.r1_dnet = Deepnet(self.hidden_dim,
                               [[hidden_dim, hidden_dim, hidden_dim],
                                [hidden_dim, hidden_dim, hidden_dim],
                                [hidden_dim, hidden_dim, hidden_dim],
                                [hidden_dim, hidden_dim, hidden_dim]],
                               self.hidden_dim, nested=0, droprate=drop_prob)
        self.fc1 = nn.Linear(backwards*input_dim, hidden_dim)
        self.fc2 = nn.Linear(hidden_dim, hidden_dim)
        self.fc3 = nn.Linear(hidden_dim, output_dim)
        self.relu = nn.LeakyReLU()

    def init_hidden(self, batch_size,device):
        hidden = self.gru1.init_hidden(batch_size,device)
        return hidden

    def forward(self, x, h):
        bs = x.shape[0]
        #out = self.conv1(x)
        #out = self.relu(self.conv2(out.view(bs,1,self.backwards*3,self.input_dim)))
        #out = self.pool1(out)
        #out = self.redu1(out).view(bs,self.backwards*3,int(self.input_dim/3))
        #out = self.relu(out)
        #'''
        out = x.transpose(0,1)
        out,attn = self.mha1(out,out,out)
        out = out.transpose(0, 1)
        out = self.relu(self.fc1(out.reshape(bs, self.backwards * self.input_dim)))
        out = self.drop(out)
        out = self.r1_dnet(out)
        out = self.relu(self.fc2(out))
        out = self.fc3(out)
        return out, h
Exemplo n.º 6
0
class GRUNet(nn.Module):
    def __init__(self,
                 input_dim,
                 hidden_dim,
                 output_dim,
                 batch_size,
                 n_layers,
                 backwards,
                 drop_prob=0.40,
                 GPUs=1):
        super(GRUNet, self).__init__()
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.output_dim = output_dim
        self.batch_size = batch_size
        self.n_layers = n_layers

        self.backwards = backwards
        self.GPUs = GPUs
        self.drop_prob = drop_prob

        self.hidden_dim = hidden_dim
        self.n_layers = n_layers
        self.attn_layers = 6
        self.GPUs = GPUs

        self.drop = nn.Dropout(p=drop_prob)

        self.lin_in = nn.Linear(input_dim, hidden_dim)

        self.attn_list = nn.ModuleList()
        self.layernorm = nn.ModuleList()
        for i in range(self.attn_layers):
            self.attn_list.append(torch.nn.MultiheadAttention(hidden_dim, 8))
            self.layernorm.append(nn.LayerNorm(hidden_dim))

        self.gru1 = GRU(int(input_dim / 1),
                        hidden_dim,
                        n_layers,
                        drop_prob,
                        batch_first=True)

        self.r1_dnet = Deepnet(
            backwards * hidden_dim,
            [[hidden_dim, hidden_dim], [hidden_dim, hidden_dim],
             [hidden_dim, hidden_dim], [hidden_dim, hidden_dim]],
            self.hidden_dim,
            nested=0,
            droprate=drop_prob)
        self.fc1 = nn.Linear(backwards * hidden_dim, hidden_dim)
        self.fc2 = nn.Linear(hidden_dim, hidden_dim)
        self.fc3 = nn.Linear(hidden_dim, output_dim)
        self.relu = nn.LeakyReLU()

    def init_hidden(self, batch_size, device):
        hidden = self.gru1.init_hidden(batch_size, device)
        return hidden

    def forward(self, x, h):
        bs = x.shape[0]

        out = self.relu(self.lin_in(x))  #input->hidden dim encoder
        out = self.drop(out)
        out = out.transpose(0, 1)  #mha1 batch transpose
        for i in range(len(self.attn_list)):
            tmp, attn = self.attn_list[i](out, out, out)
            #out = torch.layer_norm(torch.add(tmp, out), tmp.shape)
            out = self.layernorm[i](torch.add(tmp, out))
        out = out.transpose(0, 1)
        out = out.reshape(bs, self.backwards * self.hidden_dim)
        #out = self.relu( self.fc1(out))
        #out = self.drop(out)
        out = self.r1_dnet(out)
        out = self.fc3(out)
        return out, h