Beispiel #1
0
    def __init__(self,
                 input_dim,
                 hidden_dim,
                 output_dim,
                 batch_size,
                 n_layers,
                 backwards,
                 drop_prob=0.5,
                 GPUs=1):
        super(GRUNet, self).__init__()
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.output_dim = output_dim
        self.batch_size = batch_size
        self.n_layers = n_layers

        self.backwards = backwards
        self.GPUs = GPUs
        self.drop_prob = drop_prob

        self.drop = nn.Dropout(p=drop_prob)
        self.inp_width = int(self.backwards * self.input_dim)

        self.GRU = GRU(input_size=input_dim,
                       hidden_size=hidden_dim,
                       num_layers=n_layers)
        '''
        self.r1_dnet = Deepnet(self.inp_width,
                               [[hidden_dim, hidden_dim, hidden_dim, hidden_dim, hidden_dim],
                                [hidden_dim, hidden_dim, hidden_dim, hidden_dim],
                                [hidden_dim, hidden_dim, hidden_dim]],
                               self.inp_width, nested=2, droprate=drop_prob)
        '''

        self.out_lin = nn.Linear(hidden_dim, output_dim)
Beispiel #2
0
    def __init__(self,
                 input_dim,
                 hidden_dim,
                 output_dim,
                 batch_size,
                 n_layers,
                 backwards,
                 drop_prob=0.10,
                 GPUs=1):
        super(GRUNet, self).__init__()
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.output_dim = output_dim
        self.batch_size = batch_size
        self.n_layers = n_layers

        self.backwards = backwards
        self.GPUs = GPUs
        self.drop_prob = drop_prob

        self.hidden_dim = hidden_dim
        self.n_layers = n_layers
        self.attn_layers = 6
        self.GPUs = GPUs

        self.drop = nn.Dropout(p=drop_prob)

        self.lin_in = nn.Linear(input_dim, hidden_dim)

        self.attn_list = nn.ModuleList()
        self.layernorm = nn.ModuleList()
        for i in range(self.attn_layers):
            self.attn_list.append(
                torch.nn.MultiheadAttention(hidden_dim, int(hidden_dim / 4)))
            self.layernorm.append(nn.LayerNorm(hidden_dim))

        self.gru1 = GRU(int(input_dim / 1),
                        hidden_dim,
                        n_layers,
                        drop_prob,
                        batch_first=True)

        self.r1_dnet = Deepnet(self.hidden_dim,
                               [[hidden_dim, hidden_dim, hidden_dim],
                                [hidden_dim, hidden_dim, hidden_dim],
                                [hidden_dim, hidden_dim, hidden_dim],
                                [hidden_dim, hidden_dim, hidden_dim]],
                               self.hidden_dim,
                               nested=0,
                               droprate=drop_prob)
        self.fc1 = nn.Linear(backwards * hidden_dim, hidden_dim)
        self.fc2 = nn.Linear(hidden_dim, hidden_dim)
        self.fc3 = nn.Linear(hidden_dim, output_dim)
        self.relu = nn.LeakyReLU()
Beispiel #3
0
    def __init__(self, input_dim, hidden_dim, output_dim, batch_size, n_layers, backwards, drop_prob=0.10, GPUs=1):
        super(GRUNet, self).__init__()
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.output_dim = output_dim
        self.batch_size = batch_size
        self.n_layers = n_layers

        self.backwards = backwards
        self.GPUs = GPUs
        self.drop_prob = drop_prob

        self.hidden_dim = hidden_dim
        self.n_layers = n_layers
        self.GPUs = GPUs

        self.drop = nn.Dropout(p=drop_prob)
        '''
        self.conv1 = nn.Conv1d(backwards,backwards*3,kernel_size=1,stride=1,padding=0)
        #b*3, i
        self.conv2 = nn.Conv2d(1,10,kernel_size=(7,3),stride=1,padding=(3,1))
        #10,b*3,i
        self.pool1 = nn.MaxPool2d((7,3),stride=(1,3),padding=(3,0))
        #10,b*3,i/3
        self.redu1 = nn.Conv2d(10,1,kernel_size=1,stride=1,padding=0)
        #1,b*3,i/3
        '''
        self.mha1 = torch.nn.MultiheadAttention(input_dim, input_dim)
        self.mha2 = torch.nn.MultiheadAttention(hidden_dim, hidden_dim//4)
        self.mha3 = torch.nn.MultiheadAttention(hidden_dim, hidden_dim//4)

        self.gru1 = GRU(int(input_dim/1), hidden_dim, n_layers, drop_prob, batch_first=True)

        self.r1_dnet = Deepnet(self.hidden_dim,
                               [[hidden_dim, hidden_dim, hidden_dim],
                                [hidden_dim, hidden_dim, hidden_dim],
                                [hidden_dim, hidden_dim, hidden_dim],
                                [hidden_dim, hidden_dim, hidden_dim]],
                               self.hidden_dim, nested=0, droprate=drop_prob)
        self.fc1 = nn.Linear(backwards*input_dim, hidden_dim)
        self.fc2 = nn.Linear(hidden_dim, hidden_dim)
        self.fc3 = nn.Linear(hidden_dim, output_dim)
        self.relu = nn.LeakyReLU()