def __init__(self, cfg, backbone=resnet50_atrous): super(DeeplabV3Plus, self).__init__() self.backbone = backbone(pretrained=False, os=cfg.OUTPUT_STRIDE) input_channel = 512 * self.backbone.block.expansion self.aspp = ASPP(in_chans=input_channel, out_chans=cfg.ASPP_OUTDIM, rate=16//cfg.OUTPUT_STRIDE) self.dropout1 = nn.Dropout(0.5) self.upsample4 = nn.UpsamplingBilinear2d(scale_factor=4) self.upsample_sub = nn.UpsamplingBilinear2d(scale_factor=cfg.OUTPUT_STRIDE//4) indim = 64 * self.backbone.block.expansion self.shortcut_conv = nn.Sequential( nn.Conv2d(indim, cfg.SHORTCUT_DIM, cfg.SHORTCUT_KERNEL, 1, padding=cfg.SHORTCUT_KERNEL//2,bias=False), nn.BatchNorm2d(cfg.SHORTCUT_DIM), nn.ReLU(inplace=True), ) self.cat_conv = nn.Sequential( nn.Conv2d(cfg.ASPP_OUTDIM+cfg.SHORTCUT_DIM, cfg.ASPP_OUTDIM, 3, 1, padding=1,bias=False), nn.BatchNorm2d(cfg.ASPP_OUTDIM), nn.ReLU(inplace=True), nn.Dropout(0.5), nn.Conv2d(cfg.ASPP_OUTDIM, cfg.ASPP_OUTDIM, 3, 1, padding=1, bias=False), nn.BatchNorm2d(cfg.ASPP_OUTDIM), nn.ReLU(inplace=True), nn.Dropout(0.1), ) self.cls_conv = nn.Conv2d(cfg.ASPP_OUTDIM, cfg.NUM_CLASSES, 1, 1, padding=0) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0)
def __init__(self, model_name, project_dir): super(DeepLabV3, self).__init__() self.num_classes = 20 self.model_name = model_name self.project_dir = project_dir self.create_model_dirs() # specify the type of resnet model / or any other model for feature extraction self.resnet = ResNet18(project_dir) self.aspp = ASPP(num_classes=self.num_classes)
def __init__(self): super(DeepLabV3, self).__init__() self.num_classes = 4 self.resnet = ResNet50_OS16() # NOTE! specify the type of ResNet here self.aspp = ASPP( num_classes=self.num_classes ) # NOTE! if you use ResNet50-152, set self.aspp = ASPP_Bottleneck(num_classes=self.num_classes) instead
def __init__(self, sess, config, is_training=True): self.resnet_model = resnet.__dict__.get(config.resnet_model)( sess, pretrained=True, is_training=is_training) self.aspp = ASPP(is_training=is_training) self.conv = ConvBlock(config.num_classes, 1, 1, is_training=is_training, name='conv1')
def __init__(self, model_id, project_dir): super(DeepLabV3, self).__init__() self.num_classes = 5 self.model_id = model_id self.project_dir = project_dir self.resnet = ResNet18_OS8() # NOTE! specify the type of ResNet here self.aspp = ASPP(num_classes=self.num_classes) # NOTE! if you use ResNet50-152, set self.aspp = ASPP_Bottleneck(num_classes=self.num_classes) instead
def __init__(self, model_id, project_dir): super(DeepLabV3, self).__init__() # extend from parent class's constructor self.num_classes = 20 # cityscapes class constains 20 kinds + one background class self.model_id = model_id # model_id = "eval_seq" self.project_dir = project_dir # project_dir = "/root/deeplabv3" self.create_model_dirs() self.resnet = ResNet18_OS8() # NOTE! specify the type of ResNet here self.aspp = ASPP(num_classes=self.num_classes) # NOTE! if you use ResNet50-152, set self.aspp = ASPP_Bottleneck(num_classes=self.num_classes) instead
def __init__(self, num_classes, backbone='50'): super(DeepLabV3Plus, self).__init__() if backbone == '50': self.backbone = resnet50() elif backbone == '101': self.backbone = resnet101() elif backbone == '152': self.backbone = resnet152() else: raise ValueError('backbone {} not supported.'.format(backbone)) self.num_classes = num_classes self.aspp = ASPP() self.decoder = Decoder(self.num_classes)
def __init__(self): super(DeepLabV3, self).__init__() self.num_classes = 6 # self.model_id = model_id # self.project_dir = project_dir #self.create_model_dirs() self.resnet = resnet18( pretrained=True) # NOTE! specify the type of ResNet here self.aspp = ASPP( num_classes=self.num_classes ) # NOTE! if you use ResNet50-152, set self.aspp = ASPP_Bottleneck(num_classes=self.num_classes) instead
def __init__(self, block, layers, num_classes): print("model.py") self.inplanes = 128 super(ResNet, self).__init__() self.conv1 = conv3x3(3, 64, stride=2) self.bn1 = BatchNorm2d(64) self.relu1 = nn.ReLU(inplace=False) self.conv2 = conv3x3(64, 64) self.bn2 = BatchNorm2d(64) self.relu2 = nn.ReLU(inplace=False) self.conv3 = conv3x3(64, 128) self.bn3 = BatchNorm2d(128) self.relu3 = nn.ReLU(inplace=False) self.maxpool = nn.MaxPool2d( kernel_size=3, stride=2, padding=1, ceil_mode=True ) # NOTE! (ceil_mode=True will do that x (batch_size, 128, h/4, w/4) e.g. has shape (batch_size, 128, 33, 33) instead of (batch_size, 128, 32, 32) if h == w == 256) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4, multi_grid=(1, 1, 1)) self.aspp = ASPP() self.cls = nn.Conv2d(512, num_classes, kernel_size=1, stride=1, padding=0, bias=True)
def __init__(self, backbone='resnet18', num_classes=16, pretrained=False): super().__init__() assert backbone in ['resnet18', 'resnet34', 'resnet50'], 'backbone not implemented' resnet = get_model(backbone, pretrained=pretrained) self.conv1 = resnet.conv1 self.bn1 = resnet.bn1 self.relu = resnet.relu self.maxpool = resnet.maxpool self.layer1 = resnet.layer1 self.layer2 = resnet.layer2 self.layer3 = resnet.layer3 self.layer4 = resnet.layer4 self.aspp = ASPP(256, 512, [4, 8, 12], drop_prob=0) self.avgpool = resnet.avgpool self.dropout = nn.Dropout(0.5) self.fc = nn.Linear(512, num_classes)
def __init__(self, cfg): super(DeeplabV3plus, self).__init__() self.backbone = None self.backbone_layers = None input_channel = 2048 self.aspp = ASPP(dim_in=input_channel, dim_out=cfg.MODEL_ASPP_OUTDIM, rate=16 // cfg.MODEL_OUTPUT_STRIDE, bn_mom=cfg.TRAIN_BN_MOM) self.dropout1 = nn.Dropout(0.5) # self.upsample4 = nn.UpsamplingBilinear2d(scale_factor=4) # self.upsample_sub = nn.UpsamplingBilinear2d(scale_factor=cfg.MODEL_OUTPUT_STRIDE//4) # self.upsample4 = nn.Upsample(scale_factor=4) self.upsample_sub = nn.Upsample(scale_factor=cfg.MODEL_OUTPUT_STRIDE // 4) indim = 256 self.shortcut_conv = nn.Sequential( nn.Conv2d(indim, cfg.MODEL_SHORTCUT_DIM, cfg.MODEL_SHORTCUT_KERNEL, 1, padding=cfg.MODEL_SHORTCUT_KERNEL // 2, bias=True), # SynchronizedBatchNorm2d(cfg.MODEL_SHORTCUT_DIM, momentum=cfg.TRAIN_BN_MOM), nn.BatchNorm2d(cfg.MODEL_SHORTCUT_DIM), nn.ReLU(inplace=True), ) self.cat_conv = nn.Sequential( nn.Conv2d(cfg.MODEL_ASPP_OUTDIM + cfg.MODEL_SHORTCUT_DIM, cfg.MODEL_ASPP_OUTDIM, 3, 1, padding=1, bias=True), # SynchronizedBatchNorm2d(cfg.MODEL_ASPP_OUTDIM, momentum=cfg.TRAIN_BN_MOM), nn.BatchNorm2d(cfg.MODEL_ASPP_OUTDIM), nn.ReLU(inplace=True), nn.Dropout(0.5), nn.Conv2d(cfg.MODEL_ASPP_OUTDIM, cfg.MODEL_ASPP_OUTDIM, 3, 1, padding=1, bias=True), # SynchronizedBatchNorm2d(cfg.MODEL_ASPP_OUTDIM, momentum=cfg.TRAIN_BN_MOM), nn.BatchNorm2d(cfg.MODEL_ASPP_OUTDIM), nn.ReLU(inplace=True), nn.Dropout(0.1), ) self.cls_conv = nn.Conv2d(cfg.MODEL_ASPP_OUTDIM, cfg.num_classes, 1, 1, padding=0) self.backbone = build_backbone(cfg.MODEL_BACKBONE, os=cfg.MODEL_OUTPUT_STRIDE) # self.backbone_layers = self.backbone.get_layers() # print(len(self.backbone_layers)) self.pointhead = PointRendSemSegHead() for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0)
def __init__(self, output_stride=16, num_classes=21): super(DeepLab, self).__init__() self.backbone = resnet101(output_stride=output_stride) self.aspp = ASPP(output_stride) self.decoder = Decoder(num_classes)