Example #1
0
    def __init__(self, n_token, config):
        embed_size = config['embedding_size']
        hidden_size = config['hidden_size']
        n_layers = config['n_layers']
        dropout = config['dropout']
        bidirectional = config['bidirectional']
        ffnn_layers = config['linear']
        sentence_length = config["sentence_length"]

        super(RNNModel, self).__init__()

        self.embed = nn.Embedding(n_token, embed_size)
        self.word_dropout = nn.Dropout(config.get("word_dropout", 0))

        self.RNN = nn.LSTM(embed_size,
                           hidden_size,
                           n_layers,
                           batch_first=True,
                           dropout=dropout,
                           bidirectional=bidirectional)
        self.sent_dropout = nn.Dropout(config.get("sent_dropout", 0))

        # 计算出RNN层的输出size
        curr_dim = hidden_size * (bidirectional + 1) * sentence_length

        ffnn_layers = [curr_dim] + ffnn_layers
        self.layers = []
        for i, o in zip(ffnn_layers[:-1], ffnn_layers[1:]):
            self.layers.append(nn.Linear(i, o))
            self.layers.append(nn.RELU())
            self.layers.append(nn.Dropout(config.get("dropout", 0)))

        self.softmax = nn.Softmax(dim=1)
Example #2
0
 def __init__(self, c_i, c_o, f, s=1, p=0):
     super(Conv_block, self).__init__()
     self.conv = nn.Conv2d(in_channels=c_i,
                           out_channels=c_o,
                           kernel_size=f,
                           stride=s,
                           padding=p,
                           bais=false)
     self.bn = nn.BatchNorm2d(num_features=c_o, eps=0.001)
     self.relu = nn.RELU(inplace=True)
 def __init__(self,num_claasses=2):
     super(AlexNet, self).__init__()
     self.model_name = 'alexnet'
     self.feature=nn.Sequential(
         nn.Conv2d(3,64,kernel_Size=11,stride=4,padding=2),
         nn.RELU(inplace=True),
         nn.MaxPool2d(kernel_size=3,stride=2),
         nn.Conv2d(64, 192, kernel_Size=5,  padding=2),
         nn.RELU(inplace=True),
         nn.MaxPool2d(kernel_size=3, stride=2),
         nn.Conv2d( 384,256,kernel_size=3,stride=1),
         nn.RELU(inplace=True),
         nn.Conv2d(256,256,kernel_size=3,padding=1),
         nn.RELU(inplace=True),
         nn.MaxPool2d(kernel_size=3,stride=2),
     )
     self.classfier=nn.Sequential(
         nn.Dropout(),
         nn.Linear(256*6*6,4096),
         nn.RELU(inplace=True),
         nn.Dropout(),
         nn.Linear(4096,4096),
         nn.RELU(inplace=True),
         #nn.Dropout(),
         nn.Linear(4096,num_claasses),
         #nn.RELU()
     )
     def forward(self,x):
         x=self.feature(x),
         #特征层到分类层需要调整形状   由多通道展开为单通道
         x=x.view(x.size(0),256*6*6),
         x=self.classfier(x)
         return x
Example #4
0
 def __init__(self, config):
     super(TextCnn, self).__init__()
     self.config = config
     self.embedding = nn.Embedding(config.VOCAB_SIZE, config.EMBEDDING_DIM)
     convs = [
         nn.Sequential(
             nn.Conv1d(in_channels=config.EMBEDDING_DIM,
                       out_channels=config.HIDDING_DIM,
                       kernel_size=kernel_size),
             nn.BatchNorm1d(config.HIDDING_DIM), nn.RELU(inplace=True))
         for kernel_size in config.KERNEL_SIZE
     ]
     self.Maxpool = [
         nn.MaxPool1d(kernel_size=(config.SENTENCE_LEN - kernel_size + 1))
         for kernel_size in config.KERNEL_SIZE
     ]
     self.sentence_convs = nn.ModuleList(convs)
     self.fc = nn.Sequential(
         nn.Linear(config.HIDDING_DIM * len(config.KERNEL_SIZE),
                   config.FC_HID_DIM), nn.BatchNorm1d(config.FC_HID_DIM),
         nn.RELU(inplace=True),
         nn.Linear(config.FC_HID_DIM, config.LABEL_NUM_1))
Example #5
0
 def __init__(self, mem_size):
     super(CNNLSTMModel, self).__init__()
     self.mem_size = mem_size
     self.FF = nn.Linear(
         1764, 294)  #feed forward network for reduction of HOG features
     #Pretrained resnet18 on ImageNET
     self.net = models.resnet18(pretrained=True)
     #self.alexnet = models.alexnet(pretrained=True)
     self.convNet = nn.Sequential(*list(self.net.children())[:-2])
     #self.convNet = nn.Sequential(*list(self.alexnet.features.children()))
     self.resNet = None
     #self.alexnet = None
     self.convLSTM = ConvLSTM(518, self.mem_size)
     #self.convLSTM = ConvLSTM(256, self.mem_size)
     self.RELU = nn.RELU()
     self.maxpool = nn.MaxPool2d(2)
     self.FF1 = nn.Linear(3 * 3 * self.mem_size, 1000)
     self.batchNorm = nn.BatchNorm1d(1000)
     self.FF2 = nn.Linear(1000, 256)
     self.FF3 = nn.Linear(256, 10)
     self.FF4 = nn.Linear(10, 2)
     self.classifier = nn.Sequential(self.FF1, self.batchNorm, self.RELU,
                                     self.FF2, self.RELU, self.FF3,
                                     self.RELU, self.FF4)
Example #6
0
 def __init__(self, in_feature, out_feature):
     super(BP_1, self).__init__()
     self.nn = nn.Sequential(nn.Linear(in_feature, 64), nn.RELU(),
                             nn.Linear(64, 32), nn.ReLU(),
                             nn.Linear(32, out_feature))
Example #7
0
    def __init__(self):
        super(VDSR_Naivem, self).__init__()
        self.conv_first = nn.Conv2d(1, 64, 3, padding=1, bias=False)

        self.relu = nn.RELU(inplace=True)