def __init__(self, n_classes): super(EnvNet3, self).__init__() self.model = nn.Sequential( OrderedDict([ ('conv1', EnvReLu(in_channels=1, out_channels=32, kernel_size=(1, 64), stride=(1, 2), padding=0)), #[b,32, 1, 33294] ('conv2', EnvReLu(in_channels=32, out_channels=64, kernel_size=(1, 16), stride=(1, 2), padding=0)), #[b, 64, 1, 16640] ('max_pool2', nn.MaxPool2d(kernel_size=(1, 64), stride=(1, 64), ceil_mode=True)), #[b, 64, 1, 260] ('transpose', Transpose()), #[b, 1, 64, 260] ('densenet', DenseNet(growth_rate=16, block_config=(6, 12, 24, 16))), # growth_rate= 32 --> 16 解决当前服务器内存不够 ('flatten', Flatten()), ('fc11', nn.Linear(in_features=516 * 8 * 32, out_features=1024, bias=True)), ('relu11', nn.ReLU()), ('dropout11', nn.Dropout()), ('fc13', nn.Linear(in_features=1024, out_features=n_classes, bias=True)) #[b, n] ])) # params initialization for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.constant_(m.bias, 0)
def __init__(self, n_classes): super(EnvNet4, self).__init__() self.model = nn.Sequential(OrderedDict([ ('conv1', EnvReLu(in_channels=1, out_channels=32, kernel_size=(1, 64), stride=(1, 2), padding=0)), #[b,32, 1, 33294] ('conv2', EnvReLu(in_channels=32, out_channels=64, kernel_size=(1, 16), stride=(1, 2), padding=0)), #[b, 64, 1, 16640] ('max_pool2', nn.MaxPool2d(kernel_size=(1, 64), stride=(1, 64), ceil_mode=True)), #[b, 64, 1, 260] ('transpose', Transpose()), #[b, 1, 64, 260] ('densenet', DenseNet(growth_rate=16, block_config=(6, 12, 24, 16))), #[b, 1024,8,32] # growth_rate= 32 --> 16 解决当前服务器内存不够 ('senet1', SEBottleneck(inplanes=136, planes=136//4)), ('senet2', SEBottleneck(inplanes=260, planes=260//4)), # ('senet1', SELayer(channel=256)), # ('senet2', SELayer(channel=512)), ('global avgpool', nn.AdaptiveAvgPool2d(1)), ('flatten', Flatten()), ('fc13', nn.Linear(in_features=(136+260+516), out_features=n_classes, bias=True))#[b, n] ])) # # params initialization for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) if m.bias is not None: nn.init.constant_(m.bias, 0)
def __init__(self, n_classes): super(EnvNet, self).__init__() self.model = nn.Sequential( OrderedDict([('conv1', ConvBNReLu(1, 40, (1, 8))), ('conv2', ConvBNReLu(40, 40, (1, 8))), ('max_pool2', nn.MaxPool2d((1, 160), ceil_mode=True)), ('transpose', Transpose()), ('conv3', ConvBNReLu(1, 50, (8, 13))), ('max_pool3', nn.MaxPool2d((3, 3), ceil_mode=True)), ('conv4', ConvBNReLu(50, 50, (1, 5))), ('max_pool4', nn.MaxPool2d((1, 3), ceil_mode=True)), ('flatten', Flatten()), ('fc5', nn.Linear(in_features=50 * 11 * 14, out_features=4096, bias=True)), ('relu5', nn.ReLU()), ('dropout5', nn.Dropout()), ('fc6', nn.Linear(4096, 4096)), ('relu6', nn.ReLU()), ('dropout6', nn.Dropout()), ('fc7', nn.Linear(4096, n_classes))]))
def __init__(self, n_classes): super(EnvNet2, self).__init__() self.model = nn.Sequential( OrderedDict([('conv1', EnvReLu(in_channels=1, out_channels=32, kernel_size=(1, 64), stride=(1, 2), padding=0)), ('conv2', EnvReLu(in_channels=32, out_channels=64, kernel_size=(1, 16), stride=(1, 2), padding=0)), ('max_pool2', nn.MaxPool2d(kernel_size=(1, 64), stride=(1, 64), ceil_mode=True)), ('transpose', Transpose()), ('conv3', EnvReLu(in_channels=1, out_channels=32, kernel_size=(8, 8), stride=(1, 1), padding=0)), ('conv4', EnvReLu(in_channels=32, out_channels=32, kernel_size=(8, 8), stride=(1, 1), padding=0)), ('max_pool4', nn.MaxPool2d(kernel_size=(5, 3), stride=(5, 3), ceil_mode=True)), ('conv5', EnvReLu(in_channels=32, out_channels=64, kernel_size=(1, 4), stride=(1, 1), padding=0)), ('conv6', EnvReLu(in_channels=64, out_channels=64, kernel_size=(1, 4), stride=(1, 1), padding=0)), ('max_pool6', nn.MaxPool2d(kernel_size=(1, 2), stride=(1, 2), ceil_mode=True)), ('conv7', EnvReLu(in_channels=64, out_channels=128, kernel_size=(1, 2), stride=(1, 1), padding=0)), ('conv8', EnvReLu(in_channels=128, out_channels=128, kernel_size=(1, 2), stride=(1, 1), padding=0)), ('max_pool8', nn.MaxPool2d(kernel_size=(1, 2), stride=(1, 2), ceil_mode=True)), ('conv9', EnvReLu(in_channels=128, out_channels=256, kernel_size=(1, 2), stride=(1, 1), padding=0)), ('conv10', EnvReLu(in_channels=256, out_channels=256, kernel_size=(1, 2), stride=(1, 1), padding=0)), ('max_pool10', nn.MaxPool2d(kernel_size=(1, 2), stride=(1, 2), ceil_mode=True)), ('flatten', Flatten()), ('fc11', nn.Linear(in_features=256 * 10 * 8, out_features=4096, bias=True)), ('relu11', nn.ReLU()), ('dropout11', nn.Dropout()), ('fc12', nn.Linear(in_features=4096, out_features=4096, bias=True)), ('relu12', nn.ReLU()), ('dropout12', nn.Dropout()), ('fc13', nn.Linear(in_features=4096, out_features=n_classes, bias=True))])) # params initialization for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.constant_(m.bias, 0)
def __init__(self, n_classes): super(EnvNet2, self).__init__() self.model = nn.Sequential( OrderedDict([('conv1', EnvReLu(in_channels=1, out_channels=32, kernel_size=(1, 64), stride=(1, 2), padding=0)), ('conv2', EnvReLu(in_channels=32, out_channels=64, kernel_size=(1, 16), stride=(1, 2), padding=0)), ('max_pool2', nn.MaxPool2d(kernel_size=(1, 64), stride=(1, 64), ceil_mode=True)), ('transpose', Transpose()), ('conv3', EnvReLu(in_channels=1, out_channels=32, kernel_size=(8, 8), stride=(1, 1), padding=0)), ('conv4', EnvReLu(in_channels=32, out_channels=32, kernel_size=(8, 8), stride=(1, 1), padding=0)), ('max_pool4', nn.MaxPool2d(kernel_size=(5, 3), stride=(5, 3), ceil_mode=True)), ('conv5', EnvReLu(in_channels=32, out_channels=64, kernel_size=(1, 4), stride=(1, 1), padding=0)), ('conv6', EnvReLu(in_channels=64, out_channels=64, kernel_size=(1, 4), stride=(1, 1), padding=0)), ('max_pool6', nn.MaxPool2d(kernel_size=(1, 2), stride=(1, 2), ceil_mode=True)), ('conv7', EnvReLu(in_channels=64, out_channels=128, kernel_size=(1, 2), stride=(1, 1), padding=0)), ('conv8', EnvReLu(in_channels=128, out_channels=128, kernel_size=(1, 2), stride=(1, 1), padding=0)), ('max_pool8', nn.MaxPool2d(kernel_size=(1, 2), stride=(1, 2), ceil_mode=True)), ('conv9', EnvReLu(in_channels=128, out_channels=256, kernel_size=(1, 2), stride=(1, 1), padding=0)), ('conv10', EnvReLu(in_channels=256, out_channels=256, kernel_size=(1, 2), stride=(1, 1), padding=0)), ('max_pool10', nn.MaxPool2d(kernel_size=(1, 2), stride=(1, 2), ceil_mode=True)), ('flatten', Flatten()), ('fc11', nn.Linear(in_features=256 * 10 * 8, out_features=4096, bias=True)), ('relu11', nn.ReLU()), ('dropout11', nn.Dropout()), ('fc12', nn.Linear(in_features=4096, out_features=4096, bias=True)), ('relu12', nn.ReLU()), ('dropout12', nn.Dropout()), ('fc13', nn.Linear(in_features=4096, out_features=n_classes, bias=True)), ('softmax', nn.Softmax(dim=-1))]))