Beispiel #1
0
    def test_channel_prune(self):
        orig_net = resnet18(num_classes=10).to(device)
        channel_prune(orig_net)
        state_dict = torch.load(MODEL_FILE)

        orig_net = resnet18(num_classes=10).to(device)
        orig_net.load_state_dict(state_dict)
        apply_compression_results(orig_net, MASK_FILE)
        orig_net.eval()

        net = resnet18(num_classes=10).to(device)

        net.load_state_dict(state_dict)
        net.eval()

        data = torch.randn(BATCH_SIZE, 3, 128, 128).to(device)
        ms = ModelSpeedup(net, data, MASK_FILE, confidence=8)
        ms.speedup_model()
        ms.bound_model(data)

        net.eval()

        ori_sum = orig_net(data).abs().sum().item()
        speeded_sum = net(data).abs().sum().item()

        print(ori_sum, speeded_sum)
        assert (abs(ori_sum - speeded_sum) / abs(ori_sum) < RELATIVE_THRESHOLD) or \
            (abs(ori_sum - speeded_sum) < ABSOLUTE_THRESHOLD)
Beispiel #2
0
    def __init__(self,
                 emb_dim,
                 type=18,
                 fc_dim=None,
                 norm=True,
                 pretrained=True,
                 lock=False):
        super(ResNetEncoder, self).__init__()

        self.fc_dim = fc_dim
        self.norm = norm
        self.pretrained = pretrained

        if type == 18:
            if self.pretrained:
                self.backbone = nn.Sequential(
                    *list(resnet18(pretrained=True).children())[:-1])
                if lock:
                    for param in self.backbone.parameters():
                        param.requires_grad = False
                ll_size = 512
            elif fc_dim:
                self.backbone = resnet18(pretrained=False, num_classes=fc_dim)
            else:
                self.backbone = resnet18(pretrained=False, num_classes=emb_dim)
        elif type == 50:
            if self.pretrained:
                self.backbone = nn.Sequential(
                    *list(resnet50(pretrained=True).children())[:-1])
                if lock:
                    for param in self.backbone.parameters():
                        param.requires_grad = False
                ll_size = 2048
            elif fc_dim:
                self.backbone = resnet50(pretrained=False, num_classes=fc_dim)
            else:
                self.backbone = resnet50(pretrained=False, num_classes=emb_dim)

        if self.pretrained:

            if fc_dim:
                self.fc1 = nn.Linear(ll_size, fc_dim)
                self.bn1 = nn.BatchNorm1d(fc_dim)
                self.fc2 = nn.Linear(fc_dim, emb_dim)
            else:
                self.fc1 = nn.Linear(ll_size, emb_dim)
        else:
            if fc_dim:
                self.bn1 = nn.BatchNorm1d(fc_dim)
                self.fc1 = nn.Linear(fc_dim, emb_dim)
Beispiel #3
0
    def __init__(self, classes=19):
        """
        Model initialization
        :param x_n: number of input neurons
        :type x_n: int
        """
        super().__init__()

        base = resnet.resnet18(pretrained=True)

        self.in_block = nn.Sequential(base.conv1, base.bn1, base.relu,
                                      base.maxpool)

        self.encoder1 = base.layer1
        self.encoder2 = base.layer2
        self.encoder3 = base.layer3
        self.encoder4 = base.layer4

        self.decoder1 = Decoder(64, 64, 3, 1, 1, 0)
        self.decoder2 = Decoder(128, 64, 3, 2, 1, 1)
        self.decoder3 = Decoder(256, 128, 3, 2, 1, 1)
        self.decoder4 = Decoder(512, 256, 3, 2, 1, 1)

        # Classifier
        self.tp_conv1 = nn.Sequential(
            nn.ConvTranspose2d(64, 32, 3, 2, 1, 1),
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True),
        )
        self.conv2 = nn.Sequential(
            nn.Conv2d(32, 32, 3, 1, 1),
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True),
        )
        self.tp_conv2 = nn.ConvTranspose2d(32, classes, 2, 2, 0)
Beispiel #4
0
    def __init__(
        self,
        model_arch: str,
        num_input_channels: int,
        num_targets: int,
        weights_scaling: List[float],
        criterion: nn.Module,
        pretrained: bool = True,
    ) -> None:
        super().__init__()
        self.model_arch = model_arch
        self.num_input_channels = num_input_channels
        self.num_targets = num_targets
        self.register_buffer("weights_scaling", torch.tensor(weights_scaling))
        self.pretrained = pretrained
        self.criterion = criterion

        if pretrained and self.num_input_channels != 3:
            warnings.warn("There is no pre-trained model with num_in_channels != 3, first layer will be reset")

        if model_arch == "resnet18":
            self.model = resnet18(pretrained=pretrained)
            self.model.fc = nn.Linear(in_features=512, out_features=num_targets)
        elif model_arch == "resnet50":
            self.model = resnet50(pretrained=pretrained)
            self.model.fc = nn.Linear(in_features=2048, out_features=num_targets)
        else:
            raise NotImplementedError(f"Model arch {model_arch} unknown")

        if self.num_input_channels != 3:
            self.model.conv1 = nn.Conv2d(
                self.num_input_channels, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False
            )
Beispiel #5
0
 def __init__(self, num_classes):
     super(CelebNet, self).__init__()
     self.num_classes = num_classes
     self.resnet = resnet18(num_classes=512)
     self.relu = nn.ReLU()
     for i in range(self.num_classes):
         self.add_module(f"seq_class_{i}", self._make_node())
Beispiel #6
0
def resnet18_reid(features=128, classes=1502):
    resnet = resnet18(pretrained=True)
    resnet.layer4[0].downsample[0].stride = (1, 1)
    resnet.layer4[0].conv1.stride = (1, 1)
    resnet.fc = None
    model = ReIDResnet(resnet, 512, features, classes)
    return model
    def __init__(
            self,
            model_file='../resvar_weights/labeled-resvar-latest.torch',
            model_file2='../resvar_weights/segmentation-network-latest.torch'):
        # You should
        #       1. create the model object
        #       2. load your state_dict
        #       3. call cuda()
        # self.model = ...
        #
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.model = Prototype(device, hidden_dim=1024, variational=True)

        from torchvision.models.resnet import resnet18
        from model.util import remove_backbone_head

        seg_backbone, _ = remove_backbone_head(resnet18(pretrained=False))
        self.seg_model = SegmentationNetwork(seg_backbone, 2)

        #self.seg_model = SegmentationNetwork(self.model.backbone, 3)

        # Load models
        self.model.load_state_dict(torch.load(model_file))
        self.seg_model.load_state_dict(torch.load(model_file2))

        self.model.eval()
        self.seg_model.eval()

        self.model.to(device)
        self.seg_model.to(device)
Beispiel #8
0
    def __init__(self, cfg: Dict):
        super().__init__()

        self.backbone = resnet18(pretrained=True, progress=True)

        num_history_channels = (cfg["model_params"]["history_num_frames"] +
                                1) * 2
        num_in_channels = 3 + num_history_channels

        self.backbone.conv1 = nn.Conv2d(
            num_in_channels,
            self.backbone.conv1.out_channels,
            kernel_size=self.backbone.conv1.kernel_size,
            stride=self.backbone.conv1.stride,
            padding=self.backbone.conv1.padding,
            bias=False,
        )

        # This is 512 for resnet18 and resnet34;
        # And it is 2048 for the other resnets
        backbone_out_features = 512

        # X, Y coords for the future positions (output shape: Bx50x2)
        num_targets = 2 * cfg["model_params"]["future_num_frames"]

        # You can add more layers here.
        self.head = nn.Sequential(
            # nn.Dropout(0.2),
            nn.Linear(in_features=backbone_out_features, out_features=4096), )

        self.logit = nn.Linear(4096, out_features=num_targets)
    def __init__(self, im_size, hidden_dim, n_classes):
        '''
        Create components of a two layer neural net classifier (often
        referred to as an MLP) and initialize their weights.

        Arguments:
            im_size (tuple): A tuple of ints with (channels, height, width)
            hidden_dim (int): Number of hidden activations to use
            n_classes (int): Number of classes to score
        '''
        super(ResnetTwoLayerNN, self).__init__()
        #############################################################################
        # TODO: Initialize anything you need for the forward pass		    #
        #############################################################################
        self.model = resNet.resnet18(pretrained=True)
        for param in self.model.parameters():
            param.requires_grad = False

        self.model.fc = nn.Sequential(
            nn.Linear(512, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, n_classes),
        )
        self.softmax = nn.Softmax(dim=1)
        self.upsample_image = nn.Upsample(scale_factor=7,
                                          mode="bilinear",
                                          align_corners=True)
Beispiel #10
0
    def __init__(self, n_classes=21):
        """
        Initialization
        """
        super(InstrumentsMFF, self).__init__()
        self.spp = spatialPyramidPooling(pool_sizes=[16, 8, 4, 2])
        self.mff_sub24_x2 = multiresolutionFeatureFusion(n_classes, 128, 512, 512, with_bn=True )
        base = resnet.resnet18(pretrained=True)
        self.in_block = nn.Sequential(base.conv1, base.bn1, base.relu, base.maxpool)
        self.encoder1 = base.layer1
        self.encoder2 = base.layer2
        self.encoder3 = base.layer3
        self.encoder4 = base.layer4

        # decoder
        self.decoder1 = Decoder(64, 64, 3, 1, 1, 0)
        self.decoder2 = Decoder(128, 64, 3, 2, 1, 1)
        self.decoder3 = Decoder(256, 128, 3, 2, 1, 1)
        self.decoder4 = Decoder(512, 256, 3, 2, 1, 1)
        self.decoder4_x2 = Decoder(512, 128, 3, 2, 1, 1)

        # Classifier
        self.deconv1 = nn.Sequential(nn.ConvTranspose2d(64, 32, 3, 2, 1, 1),
                                      nn.BatchNorm2d(32),
                                      nn.ReLU(inplace=True), )
        self.conv2 = nn.Sequential(nn.Conv2d(32, 32, 3, 1, 1),
                                   nn.BatchNorm2d(32),
                                   nn.ReLU(inplace=True), )
        self.conv2_x2 = nn.Sequential(nn.Conv2d(128, 32, 3, 1, 1),
                                      nn.BatchNorm2d(32),
                                      nn.ReLU(inplace=True), )
        self.deconv2 = nn.ConvTranspose2d(32, n_classes, 2, 2, 0)
        self.lsm = nn.LogSoftmax(dim=1)
Beispiel #11
0
 def __init__(self, pretrained=None):
     super(HeadCommon, self).__init__()
     self.config = ConfigParser()
     config_path = os.path.abspath(
         os.path.join(__file__, "../../", "config.ini"))
     assert os.path.exists(config_path), "config.ini not exists!"
     self.config.read(config_path)
     self.backbone_type = self.config['BACKBONE']['BACKBONE_TYPE']
     _pretrained = True if pretrained is not None else False
     assert self.backbone_type in ['resnet', 'vgg16']
     if self.backbone_type == 'resnet':
         resnet_layer = int(self.config['BACKBONE']['RESNET_LAYER'])
         assert resnet_layer in [18, 34, 50, 101, 152]
         if resnet_layer == 18:
             _resnet = resnet.resnet18(_pretrained)
         elif resnet_layer == 34:
             _resnet = resnet.resnet34(_pretrained)
         elif resnet_layer == 50:
             _resnet = resnet.resnet50(_pretrained)
         elif resnet_layer == 101:
             _resnet = resnet.resnet101(_pretrained)
         else:
             _resnet = resnet.resnet152(_pretrained)
         # using resnet_c5 the last bottle neck of resnet
         _resnet.layer4[0].conv2.stride = (1, 1)
         _resnet.layer4[0].downsample[0].stride = (1, 1)
         self.resnet_c5 = _resnet.layer4
         self.resnet_c5_avg = _resnet.avgpool
     elif self.backbone_type == 'vgg16':
         assert not bool(int(self.config['HEAD']['MASK_HEAD_ON'])), (
             "When mask head on, not support vgg16 backbone.")
         vgg = vgg16(pretrained=True)
         self.vgg_fc = nn.Sequential(
             *list(vgg.classifier._modules.values())[:-1])
    def __init__(self, cfg, encoder='resnet18', upsample=True, device=None):
        super(TRecgNet_Upsample_Resiual, self).__init__()

        self.encoder = encoder
        self.cfg = cfg
        self.upsample = upsample
        self.dim_noise = 128
        self.device = device
        self.avg_pool_size = 14

        dims = [32, 64, 128, 256, 512, 1024, 2048]

        if cfg.PRETRAINED == 'imagenet' or cfg.PRETRAINED == 'place':
            pretrained = True
        else:
            pretrained = False

        if cfg.PRETRAINED == 'place':
            resnet = models.__dict__['resnet18'](num_classes=365)
            # places model downloaded from http://places2.csail.mit.edu/
            checkpoint = torch.load(self.cfg.CONTENT_MODEL_PATH,
                                    map_location=lambda storage, loc: storage)
            state_dict = {
                str.replace(k, 'module.', ''): v
                for k, v in checkpoint['state_dict'].items()
            }
            resnet.load_state_dict(state_dict)
            print('place resnet18 loaded....')
        else:
            resnet = resnet18(pretrained=pretrained)
            print('{0} pretrained:{1}'.format(encoder, str(pretrained)))

        self.conv1 = resnet.conv1
        self.bn1 = resnet.bn1
        self.relu = resnet.relu
        self.maxpool = resnet.maxpool  # 1/4
        self.layer1 = resnet.layer1  # 1/4
        self.layer2 = resnet.layer2  # 1/8
        self.layer3 = resnet.layer3  # 1/16
        self.layer4 = resnet.layer4  # 1/32

        self.build_upsample_layers(dims)

        self.avgpool = nn.AvgPool2d(self.avg_pool_size, 1)
        self.fc = nn.Linear(dims[4], cfg.NUM_CLASSES)

        if pretrained and upsample:

            init_weights(self.up1, 'normal')
            init_weights(self.up2, 'normal')
            init_weights(self.up3, 'normal')
            init_weights(self.up4, 'normal')
            init_weights(self.skip_3, 'normal')
            init_weights(self.skip_2, 'normal')
            init_weights(self.skip_1, 'normal')
            init_weights(self.up_image, 'normal')

        elif not pretrained:

            init_weights(self, 'normal')
Beispiel #13
0
    def __init__(self, im_size, hidden_dim, kernel_size, n_classes):
        '''
        Create components of a CNN classifier and initialize their weights.

        Arguments:
            im_size (tuple): A tuple of ints with (channels, height, width)
            hidden_dim (int): Number of hidden activations to use
            kernel_size (int): Width and height of (square) convolution filters
            n_classes (int): Number of classes to score
        '''

        super(ONLYRESNET, self).__init__()
        #############################################################################
        # TODO: Initialize anything you need for the forward pass
        #############################################################################

        import torchvision.models as models

        #Begin of New Code

        self.resnet18_full = resnet18(pretrained=True)

        #self.resnet18 = nn.Sequential(*list(self.resnet18_full.children())[:-1])

        #for param in self.resnet18.parameters():
        #        param.requires_grad= False

        #self.upsample_img    = nn.Upsample(scale_factor=7, mode="bilinear", align_corners=True)
        #End of New Code

        pass
Beispiel #14
0
    def __init__(self, feature_dim=128, resnet_depth=18):
        super(Model, self).__init__()

        self.f = []
        if resnet_depth == 18:
            my_resnet = resnet18()
            resnet_output_dim = 512
        elif resnet_depth == 34:
            my_resnet = resnet34()
            resnet_output_dim = 512
        elif resnet_depth == 50:
            my_resnet = resnet50()
            resnet_output_dim = 2048

        for name, module in my_resnet.named_children():
            if name == 'conv1':
                module = nn.Conv2d(3,
                                   64,
                                   kernel_size=3,
                                   stride=1,
                                   padding=1,
                                   bias=False)
            if not isinstance(module, nn.Linear) and not isinstance(
                    module, nn.MaxPool2d):
                self.f.append(module)
        # encoder
        self.f = nn.Sequential(*self.f)
        # projection head
        self.g = nn.Sequential(nn.Linear(resnet_output_dim, 512, bias=False),
                               nn.BatchNorm1d(512), nn.ReLU(inplace=True),
                               nn.Linear(512, feature_dim, bias=True))
Beispiel #15
0
 def __init__(self, rnn_hidden=256, out_size=12):
     super(ConvNet, self).__init__()
     self.rnn_hidden = rnn_hidden
     self.num_layers = 4
     self.out_size = out_size
     self.lstm_input_size = out_size
     self.encoder = resnet18(pretrained=True)
     # Disable grad
     # for param in self.encoder.parameters():
     #     param.requires_grad = False
     # for param in self.encoder.layer4.parameters():
     #     param.requires_grad = True
     self.encoder.fc = torch.nn.Linear(self.encoder.fc.in_features, 512)
     self.cnn2h0 = torch.nn.Sequential(
         torch.nn.Dropout(0.4),
         torch.nn.Linear(512, self.num_layers * self.rnn_hidden))
     self.cnn2c0 = torch.nn.Sequential(
         torch.nn.Dropout(0.4),
         torch.nn.Linear(512, self.num_layers * self.rnn_hidden))
     self.lstm = torch.nn.LSTM(input_size=self.lstm_input_size,
                               hidden_size=rnn_hidden,
                               num_layers=self.num_layers,
                               dropout=0.4,
                               batch_first=True)
     self.linear = torch.nn.Sequential(
         torch.nn.Linear(rnn_hidden, rnn_hidden), torch.nn.Dropout(0.4),
         torch.nn.Linear(rnn_hidden, out_size))
     self.hidden_w = None
Beispiel #16
0
def load_net(net_name, pretrained_path=None, zoo_pretrained=False):
    if net_name == 'resnet18':
        net = resnet18(zoo_pretrained, num_classes=1108)
        net.conv1 = nn.Conv2d(6,
                              64,
                              kernel_size=7,
                              stride=2,
                              padding=3,
                              bias=False)

    elif net_name == 'resnet50':
        net = resnet50(zoo_pretrained, num_classes=1108)
        net.conv1 = nn.Conv2d(6,
                              64,
                              kernel_size=7,
                              stride=2,
                              padding=3,
                              bias=False)

    elif net_name == 'mobilenet':
        raise NotImplementedError

    elif net_name == 'vggnet':
        raise NotImplementedError

    else:
        raise ValueError("invalid net_name : {}".format(net_name))

    if pretrained_path is not None:
        net.load_state_dict(torch.load(pretrained_path))
        print("pretrained {} weights will be used".format(pretrained_path))

    return net
def get_classification_model(classifier_name='resnet18',
                             num_classes=1000,
                             pretrained=True):
    """
    Get the detection model
    :param pretrained:
    :param classifier_name:
    :param num_classes:
    :return:
    """
    if classifier_name == 'resnet18':
        model = resnet18(pretrained, num_classes=num_classes)

    elif classifier_name == 'resnet34':
        model = resnet34(pretrained, num_classes=num_classes)

    elif classifier_name == 'resnet50':
        model = resnet50(pretrained, num_classes=num_classes)

    elif classifier_name == 'resnet101':
        model = resnet101(pretrained, num_classes=num_classes)

    elif classifier_name == 'resnet152':
        model = resnet152(pretrained, num_classes=num_classes)

    else:
        raise ValueError('Unsupported resnet type.')

    return model
Beispiel #18
0
def test_int8():
    from torchvision.models.resnet import resnet18
    from calibration import ImageCalibrator

    imgs_dir = '/mnt/data/sample_comparison/sample_comp/train_ori'
    images = [os.path.join(imgs_dir, img) for img in os.listdir(imgs_dir)[:3]]
    calibrator = ImageCalibrator(images,
                                 width=256,
                                 height=256,
                                 channel=3,
                                 batch_size=1,
                                 cache_file='./{}.cache'.format('int8'))

    engine_name = 'model_i8.engine'
    input_names = ['input']
    output_names = ['output']
    print('Build Engine Model.')
    batch_size = 1
    x = torch.ones((batch_size, 3, 256, 256)).cuda()
    model = resnet18(num_classes=5).cuda().eval()
    input_names, output_names = build_tensorRT_model(model, [x],
                                                     input_names=input_names,
                                                     output_names=output_names,
                                                     int8_mode=True,
                                                     calibrator=calibrator,
                                                     engine_name=engine_name)
    model = load_tensorRT_model(engine_name, input_names, output_names)
    model(x)
    def __init__(self, feature_dim=128):
        super(TEST_MODEL, self).__init__()

        self.f = []
        for name, module in resnet18().named_children():
            if name == 'conv1':
                module = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
            if not isinstance(module, nn.Linear) and not isinstance(module, nn.MaxPool2d):
                self.f.append(module)
        # encoder
        self.f = nn.Sequential(*self.f)
        # projection mlp
        self.projection = nn.Sequential(*[nn.Linear(512, 2048, bias=True), 
                  nn.BatchNorm1d(2048),
                  nn.ReLU(inplace=True), 
                  nn.Linear(2048, 2048, bias=True), 
                  nn.BatchNorm1d(2048),
                  nn.ReLU(inplace=True), 
                  nn.Linear(2048, feature_dim, bias=True),
                  nn.BatchNorm1d(feature_dim)])
        # prediction mlp
        self.h = nn.Sequential(
                  nn.Linear(feature_dim, 512, bias=True), 
                  nn.BatchNorm1d(512),
                  nn.ReLU(inplace=True), 
                  nn.Linear(512, feature_dim, bias=True), 
        )
Beispiel #20
0
    def __init__(self,
                 arch="resnet18",
                 num_classes=1000,
                 torchvision_pretrained=False,
                 pretrained_num_classes=1000,
                 fix_bn=False,
                 partial_bn=False,
                 zero_init_residual=False):
        super(TorchvisionResNet, self).__init__()

        self.num_classes = num_classes
        self.fix_bn = fix_bn
        self.partial_bn = partial_bn

        if arch == 'resnet18':
            self.model = resnet18(pretrained=torchvision_pretrained,
                                  num_classes=pretrained_num_classes,
                                  zero_init_residual=zero_init_residual)
        elif arch == 'resnet50':
            self.model = resnet50(pretrained=torchvision_pretrained,
                                  num_classes=pretrained_num_classes,
                                  zero_init_residual=zero_init_residual)
        elif arch == 'resnext50_32x4d':
            self.model = resnext50_32x4d(pretrained=torchvision_pretrained,
                                         num_classes=pretrained_num_classes,
                                         zero_init_residual=zero_init_residual)
        else:
            raise ValueError('no such value')

        self.init_weights(num_classes, pretrained_num_classes)
    def __init__(self, backbone='resnet50', pretrained_path=None):
        super().__init__()
        if backbone == 'resnet18':
            backbone = resnet18(pretrained=not pretrained_path)
            self.final_out_channels = 256
            self.low_level_inplanes = 64
        elif backbone == 'resnet34':
            backbone = resnet34(pretrained=not pretrained_path)
            self.final_out_channels = 256
            self.low_level_inplanes = 64
        elif backbone == 'resnet50':
            backbone = resnet50(pretrained=not pretrained_path)
            self.final_out_channels = 1024
            self.low_level_inplanes = 256
        elif backbone == 'resnet101':
            backbone = resnet101(pretrained=not pretrained_path)
            self.final_out_channels = 1024
            self.low_level_inplanes = 256
        else:  # backbone == 'resnet152':
            backbone = resnet152(pretrained=not pretrained_path)
            self.final_out_channels = 1024
            self.low_level_inplanes = 256
        if pretrained_path:
            backbone.load_state_dict(torch.load(pretrained_path))


        self.early_extractor = nn.Sequential(*list(backbone.children())[:5])
        self.later_extractor = nn.Sequential(*list(backbone.children())[5:7])

        conv4_block1 = self.later_extractor[-1][0]

        conv4_block1.conv1.stride = (1, 1)
        conv4_block1.conv2.stride = (1, 1)
        conv4_block1.downsample[0].stride = (1, 1)
Beispiel #22
0
    def __init__(self, backbone='resnet50', backbone_path=None):
        super().__init__()
        if backbone == 'resnet18':
            backbone = resnet18(pretrained=not backbone_path)
            self.out_channels = [256, 512, 512, 256, 256, 128]
        elif backbone == 'resnet34':
            backbone = resnet34(pretrained=not backbone_path)
            self.out_channels = [256, 512, 512, 256, 256, 256]
        elif backbone == 'resnet50':
            backbone = resnet50(pretrained=not backbone_path)
            self.out_channels = [1024, 512, 512, 256, 256, 256]
        elif backbone == 'resnet101':
            backbone = resnet101(pretrained=not backbone_path)
            self.out_channels = [1024, 512, 512, 256, 256, 256]
        else:  # backbone == 'resnet152':
            backbone = resnet152(pretrained=not backbone_path)
            self.out_channels = [1024, 512, 512, 256, 256, 256]
        if backbone_path:
            backbone.load_state_dict(torch.load(backbone_path))

        for name, parameter in backbone.named_parameters():
            if 'layer2' not in name and 'layer3' not in name and 'layer4' not in name:
                parameter.requires_grad_(False)
        self.feature_extractor = nn.Sequential(*list(backbone.children())[:7])

        conv4_block1 = self.feature_extractor[-1][0]

        conv4_block1.conv1.stride = (1, 1)
        conv4_block1.conv2.stride = (1, 1)
        conv4_block1.downsample[0].stride = (1, 1)
 def __init__(self, num_output):
     super().__init__()
     # self.resizing = nn.UpsamplingBilinear2d(size=(224, 224))
     # self.feature_extractor = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=False)
     self.feature_extractor = resnet18(norm_layer=Norm_Layer)
     self.feature_extractor.fc =nn.Linear(self.feature_extractor.fc.in_features, num_output)
     self.apply(init_layers)
    def __init__(self, backbone, num_classes=21):
        super().__init__()
        if backbone == 'resnet-18':
            self.backbone = resnet18()
            self.in_channel = [512, 128, 64, 64]
            self.out_channel = [256, 64, 64, 64]
        elif backbone == 'resnet-34':
            self.backbone = resnet34()
            self.in_channel = [512, 128, 64, 64]
            self.out_channel = [256, 64, 64, 64]
        elif backbone == 'resnet-50':
            self.backbone = resnet50()
            self.in_channel = [2048, 512, 256, 64]
            self.out_channel = [1024, 256, 64, 64]
        elif backbone == 'resnet-101':
            self.backbone = resnet101()
            self.in_channel = [2048, 512, 256, 64]
            self.out_channel = [1024, 256, 64, 64]
        elif backbone == 'resnet-152':
            self.backbone = resnet152()
            self.in_channel = [2048, 512, 256, 64]
            self.out_channel = [1024, 256, 64, 64]
        else:
            raise NotImplementedError

        self.encoder = Encoder(self.backbone)
        self.decoder = Decoder(self.in_channel, self.out_channel)
        self.out = nn.Conv2d(64, num_classes, 1)

        self._init_weight()
Beispiel #25
0
 def __init__(self):
     super(YOLO_v1, self).__init__()
     resnet = resnet18(pretrained=True)  # 调用torchvision里的resnet34预训练模型
     resnet_out_channel = resnet.fc.in_features  # 记录resnet全连接层之前的网络输出通道数,方便连入后续卷积网络中
     self.resnet = nn.Sequential(*list(
         resnet.children())[:-2])  # 去除resnet的最后两层
     # 以下是YOLOv1的最后四个卷积层
     self.Conv_layers = nn.Sequential(
         nn.Conv2d(resnet_out_channel, 1024, 3, padding=1),
         nn.BatchNorm2d(1024),  # 为了加快训练,这里增加了BN层,原论文里YOLOv1是没有的
         nn.LeakyReLU(),
         nn.Conv2d(1024, 1024, 3, stride=2, padding=1),
         nn.BatchNorm2d(1024),
         nn.LeakyReLU(),
         nn.Conv2d(1024, 1024, 3, padding=1),
         nn.BatchNorm2d(1024),
         nn.LeakyReLU(),
         nn.Conv2d(1024, 1024, 3, padding=1),
         nn.BatchNorm2d(1024),
         nn.LeakyReLU(),
     )
     # 以下是YOLOv1的最后2个全连接层
     self.Conn_layers = nn.Sequential(
         nn.Linear(7 * 7 * 1024, 4096),
         nn.LeakyReLU(),
         nn.Linear(4096, 7 * 7 * 30),
         nn.Sigmoid(
         )  # 增加sigmoid函数是为了将输出全部映射到(0,1)之间,因为如果出现负数或太大的数,后续计算loss会很麻烦
     )
Beispiel #26
0
    def __init__(self, backbone="resnet50", backbone_path=None):
        super().__init__()
        if backbone == "resnet18":
            backbone = resnet18(pretrained=not backbone_path)
            self.out_channels = [256, 512, 512, 256, 256, 128]
        elif backbone == "resnet34":
            backbone = resnet34(pretrained=not backbone_path)
            self.out_channels = [256, 512, 512, 256, 256, 256]
        elif backbone == "resnet50":
            backbone = resnet50(pretrained=not backbone_path)
            self.out_channels = [1024, 512, 512, 256, 256, 256]
        elif backbone == "resnet101":
            backbone = resnet101(pretrained=not backbone_path)
            self.out_channels = [1024, 512, 512, 256, 256, 256]
        else:
            backbone = resnet152(pretrained=not backbone_path)
            self.out_channels = [1024, 512, 512, 256, 256, 256]
        if backbone_path:
            backbone.load_state_dict(torch.load(backbone_path))

        self.feature_extractor = nn.Sequential(*list(backbone.children())[:7])

        conv4_block1 = self.feature_extractor[-1][0]

        conv4_block1.conv1.stride = (1, 1)
        conv4_block1.conv2.stride = (1, 1)
        conv4_block1.downsample[0].stride = (1, 1)
Beispiel #27
0
 def __init__(self, chan_in=3, chan_out=64, pretrained=True):
     super().__init__()
     resnet = resnet18(pretrained=pretrained)
     self.inconv = ConvBlock(chan_in, 64, k=3)
     self.layer1 = resnet.layer1
     self.layer2 = resnet.layer1
     self.outconv = ConvBlock(64, chan_out, k=1, activation=False)
Beispiel #28
0
    def __init__(self, inC, outC):
        super(BevEncode, self).__init__()

        trunk = resnet18(pretrained=False, zero_init_residual=True)
        self.conv1 = nn.Conv2d(inC,
                               64,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)
        self.bn1 = trunk.bn1
        self.relu = trunk.relu

        self.layer1 = trunk.layer1
        self.layer2 = trunk.layer2
        self.layer3 = trunk.layer3

        self.up1 = Up(64 + 256, 256, scale_factor=4)
        self.up2 = nn.Sequential(
            nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
            nn.Conv2d(256, 128, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            nn.Conv2d(128, outC, kernel_size=1, padding=0),
        )
    def __init__(self, cin, cout,
                 with_skip, dropout_p):
        super(CNN, self).__init__()

        self.cin = cin
        self.cout = cout
        self.trunk = resnet18(pretrained=False, zero_init_residual=True)
        self.trunk.conv1 = nn.Conv2d(cin, 64, kernel_size=7,
                                     stride=2, padding=3,
                                     bias=False)
        self.downscale = nn.Sequential(
            nn.Conv2d(512, 512, kernel_size=3, stride=2,
                      padding=1, bias=False),
            )
        self.upscale0 = gen_up(512, 512, scale=2)
        self.upscale1 = gen_up(512, 256, scale=4)

        if with_skip:
            self.upscale2 = gen_up(256+128, 256, scale=2)
            self.upscale3 = gen_up(256+64, 128, scale=4)
        else:
            self.upscale2 = gen_up(256, 256, scale=2)
            self.upscale3 = gen_up(256, 128, scale=4)
        self.with_skip = with_skip
        self.final = nn.Conv2d(128, cout, 1)
        self.dropout = nn.Dropout(p=dropout_p, inplace=False)
Beispiel #30
0
    def __init__(self, backbone='resnet50'):
        super().__init__()
        if backbone == 'resnet18':
            backbone = resnet18(pretrained=True)
            self.out_channels = [256, 512, 512, 256, 256, 128]
        elif backbone == 'resnet34':
            backbone = resnet34(pretrained=True)
            self.out_channels = [256, 512, 512, 256, 256, 256]
        elif backbone == 'resnet50':
            backbone = resnet50(pretrained=True)
            self.out_channels = [1024, 512, 512, 256, 256, 256]
        elif backbone == 'resnet101':
            backbone = resnet101(pretrained=True)
            self.out_channels = [1024, 512, 512, 256, 256, 256]
        else:  # backbone == 'resnet152':
            backbone = resnet152(pretrained=True)
            self.out_channels = [1024, 512, 512, 256, 256, 256]


        self.feature_extractor = nn.Sequential(*list(backbone.children())[:7])

        conv4_block1 = self.feature_extractor[-1][0]

        conv4_block1.conv1.stride = (1, 1)
        conv4_block1.conv2.stride = (1, 1)
        conv4_block1.downsample[0].stride = (1, 1)
def create_model(model_name, num_classes=1000, pretrained=False, **kwargs):
    if 'test_time_pool' in kwargs:
        test_time_pool = kwargs.pop('test_time_pool')
    else:
        test_time_pool = True
    if 'extra' in kwargs:
        extra = kwargs.pop('extra')
    else:
        extra = True
    if model_name == 'dpn68':
        model = dpn68(
            num_classes=num_classes, pretrained=pretrained, test_time_pool=test_time_pool)
    elif model_name == 'dpn68b':
        model = dpn68b(
            num_classes=num_classes, pretrained=pretrained, test_time_pool=test_time_pool)
    elif model_name == 'dpn92':
        model = dpn92(
            num_classes=num_classes, pretrained=pretrained, test_time_pool=test_time_pool, extra=extra)
    elif model_name == 'dpn98':
        model = dpn98(
            num_classes=num_classes, pretrained=pretrained, test_time_pool=test_time_pool)
    elif model_name == 'dpn131':
        model = dpn131(
            num_classes=num_classes, pretrained=pretrained, test_time_pool=test_time_pool)
    elif model_name == 'dpn107':
        model = dpn107(
            num_classes=num_classes, pretrained=pretrained, test_time_pool=test_time_pool)
    elif model_name == 'resnet18':
        model = resnet18(num_classes=num_classes, pretrained=pretrained, **kwargs)
    elif model_name == 'resnet34':
        model = resnet34(num_classes=num_classes, pretrained=pretrained, **kwargs)
    elif model_name == 'resnet50':
        model = resnet50(num_classes=num_classes, pretrained=pretrained, **kwargs)
    elif model_name == 'resnet101':
        model = resnet101(num_classes=num_classes, pretrained=pretrained, **kwargs)
    elif model_name == 'resnet152':
        model = resnet152(num_classes=num_classes, pretrained=pretrained, **kwargs)
    elif model_name == 'densenet121':
        model = densenet121(num_classes=num_classes, pretrained=pretrained, **kwargs)
    elif model_name == 'densenet161':
        model = densenet161(num_classes=num_classes, pretrained=pretrained, **kwargs)
    elif model_name == 'densenet169':
        model = densenet169(num_classes=num_classes, pretrained=pretrained, **kwargs)
    elif model_name == 'densenet201':
        model = densenet201(num_classes=num_classes, pretrained=pretrained, **kwargs)
    elif model_name == 'inception_v3':
        model = inception_v3(
            num_classes=num_classes, pretrained=pretrained, transform_input=False, **kwargs)
    else:
        assert False, "Unknown model architecture (%s)" % model_name
    return model