Esempio n. 1
0
    def __init__(self,
                 input_dim,
                 hidden_dim,
                 output_dim,
                 n_layers,
                 backwards,
                 drop_prob=0.2,
                 GPUs=1):
        super(GRUNet, self).__init__()
        assert backwards % 4 == 0
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.n_layers = n_layers
        self.backwards = backwards
        self.GPUs = GPUs
        self.drop_prob = drop_prob

        self.drop = nn.Dropout(p=drop_prob)
        self.conv1 = nn.Conv2d(1, 10, (3, 3), stride=1, padding=1)
        self.c1deepnet = Deepnet(input_dim * backwards * 10,
                                 [[hidden_dim, hidden_dim, hidden_dim],
                                  [hidden_dim, hidden_dim, hidden_dim],
                                  [hidden_dim, hidden_dim, hidden_dim],
                                  [hidden_dim, hidden_dim, hidden_dim]],
                                 input_dim * backwards)

        self.deepnet = Deepnet(input_dim * backwards,
                               [[hidden_dim, hidden_dim, hidden_dim],
                                [hidden_dim, hidden_dim, hidden_dim],
                                [hidden_dim, hidden_dim, hidden_dim],
                                [hidden_dim, hidden_dim, hidden_dim]],
                               input_dim * backwards)

        self.out_lin = nn.Linear(input_dim * backwards, output_dim)
Esempio n. 2
0
    def __init__(self,
                 input_dim,
                 hidden_dim,
                 output_dim,
                 batch_size,
                 n_layers,
                 backwards,
                 drop_prob=0.10,
                 GPUs=1):
        super(GRUNet, self).__init__()
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.output_dim = output_dim
        self.batch_size = batch_size
        self.n_layers = n_layers

        self.backwards = backwards
        self.GPUs = GPUs
        self.drop_prob = drop_prob

        self.hidden_dim = hidden_dim
        self.n_layers = n_layers
        self.attn_layers = 6
        self.GPUs = GPUs

        self.drop = nn.Dropout(p=drop_prob)

        self.lin_in = nn.Linear(input_dim, hidden_dim)

        self.attn_list = nn.ModuleList()
        self.layernorm = nn.ModuleList()
        for i in range(self.attn_layers):
            self.attn_list.append(
                torch.nn.MultiheadAttention(hidden_dim, int(hidden_dim / 4)))
            self.layernorm.append(nn.LayerNorm(hidden_dim))

        self.gru1 = GRU(int(input_dim / 1),
                        hidden_dim,
                        n_layers,
                        drop_prob,
                        batch_first=True)

        self.r1_dnet = Deepnet(self.hidden_dim,
                               [[hidden_dim, hidden_dim, hidden_dim],
                                [hidden_dim, hidden_dim, hidden_dim],
                                [hidden_dim, hidden_dim, hidden_dim],
                                [hidden_dim, hidden_dim, hidden_dim]],
                               self.hidden_dim,
                               nested=0,
                               droprate=drop_prob)
        self.fc1 = nn.Linear(backwards * hidden_dim, hidden_dim)
        self.fc2 = nn.Linear(hidden_dim, hidden_dim)
        self.fc3 = nn.Linear(hidden_dim, output_dim)
        self.relu = nn.LeakyReLU()
Esempio n. 3
0
    def __init__(self, input_dim, hidden_dim, output_dim, batch_size, n_layers, backwards, drop_prob=0.10, GPUs=1):
        super(GRUNet, self).__init__()
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.output_dim = output_dim
        self.batch_size = batch_size
        self.n_layers = n_layers

        self.backwards = backwards
        self.GPUs = GPUs
        self.drop_prob = drop_prob

        self.hidden_dim = hidden_dim
        self.n_layers = n_layers
        self.GPUs = GPUs

        self.drop = nn.Dropout(p=drop_prob)
        '''
        self.conv1 = nn.Conv1d(backwards,backwards*3,kernel_size=1,stride=1,padding=0)
        #b*3, i
        self.conv2 = nn.Conv2d(1,10,kernel_size=(7,3),stride=1,padding=(3,1))
        #10,b*3,i
        self.pool1 = nn.MaxPool2d((7,3),stride=(1,3),padding=(3,0))
        #10,b*3,i/3
        self.redu1 = nn.Conv2d(10,1,kernel_size=1,stride=1,padding=0)
        #1,b*3,i/3
        '''
        self.mha1 = torch.nn.MultiheadAttention(input_dim, input_dim)
        self.mha2 = torch.nn.MultiheadAttention(hidden_dim, hidden_dim//4)
        self.mha3 = torch.nn.MultiheadAttention(hidden_dim, hidden_dim//4)

        self.gru1 = GRU(int(input_dim/1), hidden_dim, n_layers, drop_prob, batch_first=True)

        self.r1_dnet = Deepnet(self.hidden_dim,
                               [[hidden_dim, hidden_dim, hidden_dim],
                                [hidden_dim, hidden_dim, hidden_dim],
                                [hidden_dim, hidden_dim, hidden_dim],
                                [hidden_dim, hidden_dim, hidden_dim]],
                               self.hidden_dim, nested=0, droprate=drop_prob)
        self.fc1 = nn.Linear(backwards*input_dim, hidden_dim)
        self.fc2 = nn.Linear(hidden_dim, hidden_dim)
        self.fc3 = nn.Linear(hidden_dim, output_dim)
        self.relu = nn.LeakyReLU()
Esempio n. 4
0
    def __init__(self,
                 input_dim,
                 hidden_dim,
                 output_dim,
                 n_layers,
                 backwards,
                 drop_prob=0.2,
                 GPUs=1):
        super(GRUNet, self).__init__()
        assert backwards % 8 == 0
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.n_layers = n_layers
        self.backwards = backwards
        self.GPUs = GPUs
        self.drop_prob = drop_prob

        self.drop = nn.Dropout(p=drop_prob)
        self.inp_width = int(self.backwards * self.input_dim)

        self.conv1a = torch.nn.Conv2d(1,
                                      20, (5, 1),
                                      stride=(1, 1),
                                      padding=(2, 0))  # expand in c axis
        self.conv1b = torch.nn.Conv2d(20,
                                      40, (5, 3),
                                      stride=(1, 1),
                                      padding=(2, 1))  # expand in c axis
        self.conv1c = torch.nn.Conv2d(40,
                                      80, (5, 3),
                                      stride=(1, 1),
                                      padding=(2, 1))  # expand in c axis
        self.pool1 = torch.nn.MaxPool2d(
            (7, 1), stride=(2, 1), padding=(3, 0))  # maxpool to get features
        self.redu1 = torch.nn.Conv2d(80,
                                     10, (1, 1),
                                     stride=(1, 1),
                                     padding=(0, 0))  # reduce c
        self.conv_r1 = int(10 * self.backwards / 2 * self.input_dim)

        self.conv2a = torch.nn.Conv2d(10,
                                      40, (5, 3),
                                      stride=(1, 1),
                                      padding=(2, 1))  # reduce in ohl axis
        self.conv2b = torch.nn.Conv2d(40,
                                      80, (5, 3),
                                      stride=(1, 1),
                                      padding=(2, 1))  # reduce in ohl axis
        self.conv2c = torch.nn.Conv2d(80,
                                      120, (5, 3),
                                      stride=(1, 3),
                                      padding=(2, 0))  # reduce in ohl axis
        self.pool2 = torch.nn.MaxPool2d(
            (7, 1), stride=(2, 1),
            padding=(3, 0))  # extract features out of c2
        self.redu2 = torch.nn.Conv2d(120,
                                     20, (1, 1),
                                     stride=(1, 1),
                                     padding=(0, 0))  # reduce c
        self.conv_r2 = int(20 * self.backwards / 4 * self.input_dim / 3)

        self.conv3 = torch.nn.Conv2d(20,
                                     80, (7, 3),
                                     stride=(1, 1),
                                     padding=(3, 1))  # reduce in ohl axis
        self.pool3 = torch.nn.MaxPool2d(
            (3, 3), stride=(2, 1),
            padding=(1, 1))  # extract features out of c2
        self.redu3a = torch.nn.Conv2d(80,
                                      30, (1, 1),
                                      stride=(1, 1),
                                      padding=(0, 0))  # reduce c
        self.redu3b = torch.nn.Conv2d(30,
                                      10, (1, 1),
                                      stride=(1, 1),
                                      padding=(0, 0))  # reduce c
        self.conv_r3 = int(10 * self.backwards / 8 * self.input_dim / 3)

        self.r1_dnet = Deepnet(self.conv_r1,
                               [[hidden_dim, hidden_dim, hidden_dim],
                                [hidden_dim, hidden_dim, hidden_dim]],
                               hidden_dim)

        self.r2_dnet = Deepnet(self.conv_r2,
                               [[hidden_dim, hidden_dim, hidden_dim],
                                [hidden_dim, hidden_dim, hidden_dim]],
                               hidden_dim)

        self.r3_dnet = Deepnet(self.conv_r3,
                               [[hidden_dim, hidden_dim, hidden_dim],
                                [hidden_dim, hidden_dim, hidden_dim]],
                               hidden_dim)

        self.out_lin = nn.Linear(hidden_dim * 3, output_dim)