def __init__(self): super().__init__() self.CNNSequential = SeqModule([ # convolution 1 + Relu nn.Conv2d(1, 4, 3), nn.ReLU(), # convolution 2 + Relu + pooling nn.Conv2d(4, 4, 3), nn.ReLU(), nn.MaxPool2d(2), # convolution 3 + Relu + pooling nn.Conv2d(4, 8, 3), nn.ReLU(), # convolution 4 + Relu + pooling nn.Conv2d(8, 8, 3), nn.ReLU(), nn.MaxPool2d(2), ]) # 5층까지 self.CNN2Sequential = SeqModule([ nn.Conv2d(8, 16, 3), nn.ReLU(), ]) ############ # Concatenate ############ self.afterCNN = SeqModule([ nn.MaxPool2d(2, padding=1), # same as reshape nn.flatten(), ]) self.lastLayer = SeqModule( [nn.linear(384, 128), nn.ReLU(), nn.linear(128, 10)])
def __init__(self): super().__init__() self.sequntial = [ nn.linear(784, 300), nn.leakyReLU(), nn.linear(300, 300), nn.leakyReLU(), nn.linear(300, 10) ] self.activation = []
def __init__(self, embed, dropout=False): super().__init__() self.CNNSequential = SeqModule([ # convolution 1 + Relu nn.Conv2d(3, 16, 3), nn.ReLU(), # convolution 2 + Relu + pooling nn.Conv2d(16, 16, 3), nn.ReLU(), nn.MaxPool2d(2), # convolution 3 + Relu + pooling nn.Conv2d(16, 32, 3), nn.ReLU(), nn.MaxPool2d(2), # convolution 4 + Relu + pooling nn.Conv2d(32, 32, 3), nn.ReLU(), nn.MaxPool2d(2), ]) # 5층까지 self.CNN2Sequential = SeqModule([ nn.Conv2d(32, 64, 3), nn.ReLU(), ]) ############ # Concatenate ############ self.afterCNN = SeqModule([ nn.MaxPool2d(2), # same as reshape nn.flatten(), nn.Dropout(check=dropout), nn.linear(1536, 256), nn.tanh() ]) self.Dropout = nn.Dropout(check=dropout) self.RNN = SeqModule([ nn.RNN(embed, 256), nn.tanh(), ]) ############ # 중간에 ADD ############ self.lastLayer = SeqModule( [nn.linear(256, 256), nn.ReLU(), nn.linear(256, 1665)])
def __init__(self, embed): super().__init__() self.RNN = SeqModule([nn.LSTM(65, 256)]) self.linear = SeqModule([ nn.linear(256, 65), ])
def __init__(self): super().__init__() self.sequntial = [ nn.Conv2d(1, 16, 3), nn.ReLU(), nn.MaxPool2d(2), nn.Conv2d(16, 32, 3), nn.ReLU(), nn.MaxPool2d(2), nn.Conv2d(32, 64, 3), nn.ReLU(), nn.MaxPool2d(2), nn.Conv2d(64, 128, 3), nn.ReLU(), nn.MaxPool2d(2), nn.flatten(), nn.linear(1 * 1 * 128, 10) ] self.activation = []