def __init__(self, num_classes=2):
     
     super(Resnet34_8s, self).__init__()
     
     
     # Load the pretrained weights, remove avg pool
     # layer and get the output stride of 8
     resnet34_32s = resnet.resnet34(fully_conv=True,
                                                pretrained=True,
                                                output_stride=32,
                                                remove_avg_pool_layer=True)
     
     resnet_block_expansion_rate = resnet34_32s.layer1[0].expansion
     
     # Create a linear layer -- we don't need logits in this case
     resnet34_32s.fc = nn.Sequential()
     
     self.resnet34_32s = resnet34_32s
     
     self.score_32s = nn.Conv2d(512 *  resnet_block_expansion_rate,
                                num_classes,
                                kernel_size=1)
     
     self.score_16s = nn.Conv2d(256 *  resnet_block_expansion_rate,
                                num_classes,
                                kernel_size=1)
     
     self.score_8s = nn.Conv2d(128 *  resnet_block_expansion_rate,
                                num_classes,
                                kernel_size=1)
 def __init__(self):
     super(PosNet, self).__init__()
     self.resnet = resnet.resnet34(pretrained=False)
     self.fc_1 = nn.Linear(2048, 1024)
     self.fc_2 = nn.Linear(1024, YAW_ROLL_BIN * 4)
     # fc for layout
     self.fc_layout = nn.Linear(2048, 2048)
     # fc for layout orientation
     self.fc_3 = nn.Linear(2048, 1024)
     self.fc_4 = nn.Linear(1024, LAYOUT_ORI_BIN * 2)
     # fc for layout centroid
     self.fc_5 = nn.Linear(2048, 1024)
     self.fc_6 = nn.Linear(1024, 6)
     for m in self.modules():
         # if isinstance(m, nn.Conv2d):
         #     n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
         #     m.weight.data.normal_(0, math.sqrt(2. / n))
         # elif isinstance(m, nn.BatchNorm2d):
         # elif isinstance(m, nn.BatchNorm2d):
         #     m.weight.data.fill_(1)
         #     m.bias.data.zero_()
         if isinstance(m, nn.Linear):
             m.weight.data.normal_(0, 0.01)
             m.bias.data.zero_()
     pretrained_dict = model_zoo.load_url(model_urls['resnet34'])
     model_dict = self.resnet.state_dict()
     pretrained_dict = {
         k: v
         for k, v in pretrained_dict.items() if k in model_dict
     }
     # for _, v in pretrained_dict.items():
     #     v.requires_grad = False
     model_dict.update(pretrained_dict)
     self.resnet.load_state_dict(model_dict)
Beispiel #3
0
def _get_resnet(resnet_type="18"):
    if resnet_type == "18":
        return resnet.resnet18()
    elif resnet_type == "34":
        return resnet.resnet34()
    else:
        raise ValueError("TODO")
 def __init__(self):
     super(Bdb3dNet, self).__init__()
     # if want same width and length of this image after con2d, padding=(kernel_size-1)/2 if stride=1
     self.resnet = resnet.resnet34(pretrained=False)
     self.fc1 = nn.Linear(2048, 256)
     self.fc2 = nn.Linear(256, NUM_CLASS * 4)
     self.fc3 = nn.Linear(2048, 256)
     self.fc4 = nn.Linear(256, OBJ_ORI_BIN * 2)
     self.fc5 = nn.Linear(2048, 256)
     self.fc_centroid = nn.Linear(256, OBJ_CENTER_BIN * 2)
     self.fc_off_1 = nn.Linear(2048, 256)
     self.fc_off_2 = nn.Linear(256, 2)
     for m in self.modules():
         if isinstance(m, nn.Linear):
             m.weight.data.normal_(0, 0.01)
             m.bias.data.zero_()
     pretrained_dict = model_zoo.load_url(model_urls['resnet34'])
     model_dict = self.resnet.state_dict()
     pretrained_dict = {
         k: v
         for k, v in pretrained_dict.items() if k in model_dict
     }
     # for _, v in pretrained_dict.items():
     #     v.requires_grad = False
     model_dict.update(pretrained_dict)
     self.resnet.load_state_dict(model_dict)
Beispiel #5
0
    def __init__(self, feature_size, n_classes):
        # Network architecture
        super(iCaRLNet, self).__init__()
        self.feature_extractor = resnet34()
        self.feature_extractor.fc =\
            nn.Linear(self.feature_extractor.fc.in_features, feature_size)
        self.bn = nn.BatchNorm1d(feature_size, momentum=0.01)
        self.ReLU = nn.ReLU()
        self.fc = nn.Linear(feature_size, n_classes, bias=False)
        self.feature_extractor = nn.DataParallel(self.feature_extractor)
        self.n_classes = n_classes
        self.n_known = 0

        # List containing exemplar_sets
        # Each exemplar_set is a np.array of N images
        # with shape (N, C, H, W)
        self.exemplar_sets = []

        # Learning method
        self.cls_loss = nn.CrossEntropyLoss()
        self.dist_loss = nn.BCELoss()
        self.optimizer = optim.Adam(self.parameters(),
                                    lr=learning_rate,
                                    weight_decay=0.00001)
        #self.optimizer = optim.SGD(self.parameters(), lr=2.0,
        #                           weight_decay=0.00001)

        # Means of exemplars
        self.compute_means = True
        self.exemplar_means = []
Beispiel #6
0
    def __init__(self):
        super(Foreground_Generator, self).__init__()
        self.extractor = resnet34()
        nc, nz, ngf = 1, 256, 64
        self.generator = nn.Sequential(
            # input is Z, going into a convolution
            # state size. nz x 24 x 24
            nn.ConvTranspose2d(nz, ngf * 2, 4, 1, 0, bias=False),
            nn.BatchNorm2d(ngf * 2),
            nn.ReLU(True),
            # state size. (ngf*2) x 27 x 27
            nn.ConvTranspose2d(ngf * 2, ngf, 4, 1, 0, bias=False),
            nn.BatchNorm2d(ngf),
            nn.ReLU(True),
            # state size. (ngf) x 30 x 30
            nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
            nn.Sigmoid()
            # state size. (nc) x 60 x 60
        )

        for m in self.modules():
            if isinstance(m, nn.ConvTranspose2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
Beispiel #7
0
def gen_ckpt(netname):
    model = ''

    sys.path.append("input")
    if 'vgg' in netname:
        from vggnet import vgg
        model = vgg(num_classes=1000)
    elif 'resnet18' in netname:
        from resnet import resnet18
        model = resnet18()
    elif 'resnet34' in netname:
        from resnet import resnet34
        model = resnet34()
    elif 'resnet50' in netname:
        from resnet import resnet50
        model = resnet50()
    elif 'simulator' in netname:
        from simulator import simulator
        model = simulator()

    import torch
    for m in model.modules():
        if isinstance(m, (torch.nn.Conv2d, torch.nn.Linear)):
            m.register_buffer(f'scale', torch.tensor(1.0))
    torch.save({
        'state_dict': model.state_dict(),
        'hyper_parameters': {
            'conv.scale' : 0.0,
            'act_quant_bit' : 8,
            'weight_quant_bit' : 8,
         },
    }, f'input/{netname}.ckpt')

    sys.path.append("..")
def get_net(name):
    if name == 'densenet121':
        net = densenet121()
    elif name == 'densenet161':
        net = densenet161()
    elif name == 'densenet169':
        net = densenet169()
    elif name == 'googlenet':
        net = googlenet()
    elif name == 'inception_v3':
        net = inception_v3()
    elif name == 'mobilenet_v2':
        net = mobilenet_v2()
    elif name == 'resnet18':
        net = resnet18()
    elif name == 'resnet34':
        net = resnet34()
    elif name == 'resnet50':
        net = resnet50()
    elif name == 'resnet_orig':
        net = resnet_orig()
    elif name == 'vgg11_bn':
        net = vgg11_bn()
    elif name == 'vgg13_bn':
        net = vgg13_bn()
    elif name == 'vgg16_bn':
        net = vgg16_bn()
    elif name == 'vgg19_bn':
        net = vgg19_bn()
    else:
        print(f'{name} not a valid model name')
        sys.exit(0)

    return net.to(device)
Beispiel #9
0
def initialize_encoder(model_name, num_classes, use_pretrained=True):
    # Initialize these variables which will be set in this if statement. Each of these
    #   variables is model specific.
    model_ft = None

    if model_name == "resnet18":
        """ Resnet18
        """
        model_ft = resnet.resnet18(pretrained=use_pretrained, num_classes=1000)
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)

    elif model_name == "resnet34":
        """ Resnet34
        """
        model_ft = resnet.resnet34(pretrained=use_pretrained, num_classes=1000)
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)

    elif model_name == "resnet50":
        """ Resnet50
        """
        model_ft = resnet.resnet50(pretrained=use_pretrained, num_classes=1000)
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)

    else:
        print("Invalid model name, exiting...")
        exit()

    return model_ft
Beispiel #10
0
 def __init__(self):
     super(Resnet_Classifier, self).__init__()
     self.model_ft = resnet34(pretrained=True)
     # for param in self.model_ft.parameters():
     #     param.requires_grad = False
     num_ftrs = self.model_ft.fc.in_features
     # self.sigmoid = nn.Sigmoid()
     self.model_ft.fc = nn.Linear(num_ftrs, 2)
Beispiel #11
0
    def init_net(self):

        net_args = {
            "pretrained": True,
            "n_input_channels": len(self.kwargs["static"]["imagery_bands"])
        }

        # https://pytorch.org/docs/stable/torchvision/models.html
        if self.kwargs["net"] == "resnet18":
            self.model = resnet.resnet18(**net_args)
        elif self.kwargs["net"] == "resnet34":
            self.model = resnet.resnet34(**net_args)
        elif self.kwargs["net"] == "resnet50":
            self.model = resnet.resnet50(**net_args)
        elif self.kwargs["net"] == "resnet101":
            self.model = resnet.resnet101(**net_args)
        elif self.kwargs["net"] == "resnet152":
            self.model = resnet.resnet152(**net_args)
        elif self.kwargs["net"] == "vgg11":
            self.model = vgg.vgg11(**net_args)
        elif self.kwargs["net"] == "vgg11_bn":
            self.model = vgg.vgg11_bn(**net_args)
        elif self.kwargs["net"] == "vgg13":
            self.model = vgg.vgg13(**net_args)
        elif self.kwargs["net"] == "vgg13_bn":
            self.model = vgg.vgg13_bn(**net_args)
        elif self.kwargs["net"] == "vgg16":
            self.model = vgg.vgg16(**net_args)
        elif self.kwargs["net"] == "vgg16_bn":
            self.model = vgg.vgg16_bn(**net_args)
        elif self.kwargs["net"] == "vgg19":
            self.model = vgg.vgg19(**net_args)
        elif self.kwargs["net"] == "vgg19_bn":
            self.model = vgg.vgg19_bn(**net_args)

        else:
            raise ValueError("Invalid network specified: {}".format(
                self.kwargs["net"]))

        #  run type: 1 = fine tune, 2 = fixed feature extractor
        #  - replace run type option with "# of layers to fine tune"
        if self.kwargs["run_type"] == 2:
            layer_count = len(list(self.model.parameters()))
            for layer, param in enumerate(self.model.parameters()):
                if layer <= layer_count - 5:
                    param.requires_grad = False

        # Parameters of newly constructed modules have requires_grad=True by default
        # get existing number for input features
        # set new number for output features to number of categories being classified
        # see: https://pytorch.org/tutorials/beginner/finetuning_torchvision_models_tutorial.html
        if "resnet" in self.kwargs["net"]:
            num_ftrs = self.model.fc.in_features
            self.model.fc = nn.Linear(num_ftrs, self.ncats)
        elif "vgg" in self.kwargs["net"]:
            num_ftrs = self.model.classifier[6].in_features
            self.model.classifier[6] = nn.Linear(num_ftrs, self.ncats)
Beispiel #12
0
def make_network(model_name, class_num):
    if model_name == 'resnet':
        net = resnet.resnet34(True)
        net.fc = nn.Linear(512, class_num)
    elif model_name == 'mobilenet':
        net = mobilenet.mobilenet_v2(True)
        net.classifier[1] = nn.Linear(1280, class_num)
    else:
        net = raspnet.raspnet(name=model_name, class_num=class_num)
    return net
 def __init__(self,
              num_cls,
              channels=[512, 256, 128, 64],
              is_train=False,
              is_PixelShuffle=False,
              **kwargs):
     super(Network, self).__init__()
     self.encoder = resnet34(dilated=False, pretrained=is_train, **kwargs)
     self.decoder = Decoder(num_cls=num_cls,
                            channels=channels,
                            is_PixelShuffle=is_PixelShuffle)
    def __init__(self,
                 backbone='resnet18',
                 pretrained_base=True,
                 norm_layer=nn.BatchNorm2d,
                 **kwargs):
        super(ContextPath, self).__init__()
        if backbone == 'resnet18':
            pretrained = resnet18(pretrained=pretrained_base, **kwargs)
        elif backbone == 'resnet34':
            pretrained = resnet34(pretrained=pretrained_base, **kwargs)
        elif backbone == 'resnet50':
            pretrained = resnet50(pretrained=pretrained_base, **kwargs)
        else:
            raise RuntimeError('unknown backbone: {}'.format(backbone))
        self.conv1 = pretrained.conv1
        self.bn1 = pretrained.bn1
        self.relu = pretrained.relu
        self.maxpool = pretrained.maxpool
        self.layer1 = pretrained.layer1
        self.layer2 = pretrained.layer2
        self.layer3 = pretrained.layer3
        self.layer4 = pretrained.layer4

        inter_channels = 128
        if backbone == 'resnet50':
            in_channels_1 = 2048
            in_channels_2 = 1024
        else:
            in_channels_1 = 512
            in_channels_2 = 256

        self.global_context = _GlobalAvgPooling(in_channels_1, inter_channels,
                                                norm_layer)

        self.arms = nn.ModuleList([
            AttentionRefinmentModule(in_channels_1, inter_channels, norm_layer,
                                     **kwargs),
            AttentionRefinmentModule(in_channels_2, inter_channels, norm_layer,
                                     **kwargs)
        ])
        self.refines = nn.ModuleList([
            _ConvBNReLU(inter_channels,
                        inter_channels,
                        3,
                        1,
                        1,
                        norm_layer=norm_layer),
            _ConvBNReLU(inter_channels,
                        inter_channels,
                        3,
                        1,
                        1,
                        norm_layer=norm_layer)
        ])
Beispiel #15
0
    def __init__(self, classNum, pretrained=True):
        super(DeepMAR_res34, self).__init__()

        self.base = resnet.resnet34(pretrained=pretrained)
        self.num_att = classNum

        #print ((self.base))
        #exit()

        self.classifier = nn.Linear(512, self.num_att)
        init.normal(self.classifier.weight, std=0.001)
        init.constant(self.classifier.bias, 0)
Beispiel #16
0
def build_model(model_name, pretrained=False):
    if model_name == 'resnet34':
        model = resnet.resnet34(pretrained=False)
    elif model_name == 'resnet50':
        model = resnet.resnet50(pretrained=False)
    elif model_name == 'resnet101':
        model = resnet.resnet101(pretrained=False)
    elif model_name == 'resnet152':
        model = resnet.resnet152(pretrained=False)

    if model_name == 'resnet18':
        model.conv1 = nn.Conv2d(2,
                                64,
                                kernel_size=7,
                                stride=2,
                                padding=3,
                                bias=False)
        model.avg_pool = nn.AdaptiveAvgPool2d(1)
        model.last_linear = nn.Linear(512, 8)
    elif model_name == 'resnet34':
        model.conv1 = nn.Conv2d(2,
                                64,
                                kernel_size=7,
                                stride=2,
                                padding=3,
                                bias=False)
        model.avgpool = nn.AdaptiveAvgPool2d(1)
        model.fc = nn.Linear(512, 8)  # Nx8
    else:
        model.conv1 = nn.Conv2d(2,
                                64,
                                kernel_size=7,
                                stride=2,
                                padding=3,
                                bias=False)
        model.avgpool = nn.AdaptiveAvgPool2d(1)
        model.fc = nn.Linear(2048, 8)  # Nx8

    if pretrained == True:
        exclude_dict = ['conv1.weight', 'fc.weight', 'fc.bias']
        pretrained_dict = model_zoo.load_url(model_urls[model_name])
        model_dict = model.state_dict()

        pretrained_dict = {
            k: v
            for k, v in pretrained_dict.items() if k not in exclude_dict
        }

        model_dict.update(pretrained_dict)
        model.load_state_dict(model_dict)

    return model
Beispiel #17
0
    def __init__(self,
                 num_channels=3,
                 num_classes=1,
                 num_filters=32,
                 pretrained=False,
                 is_deconv=False):
        """
        :param num_classes:
        :param num_filters:
        :param pretrained:
            False - no pre-trained network is used
            True  - encoder is pre-trained with resnet34
        :is_deconv:
            False: bilinear interpolation is used in decoder
            True: deconvolution is used in decoder
        """
        super().__init__()
        self.num_classes = num_classes

        self.pool = nn.MaxPool2d(2, 2)

        self.encoder = resnet34(pretrained=pretrained,
                                num_channels=num_channels)

        self.relu = nn.ReLU(inplace=True)

        self.conv1 = nn.Sequential(self.encoder.conv1, self.encoder.bn1,
                                   self.encoder.relu, self.pool)

        self.conv2 = self.encoder.layer1

        self.conv3 = self.encoder.layer2

        self.conv4 = self.encoder.layer3

        self.conv5 = self.encoder.layer4

        self.center = DecoderBlockV2(512, num_filters * 8 * 2, num_filters * 8,
                                     is_deconv)

        self.dec5 = DecoderBlockV2(512 + num_filters * 8, num_filters * 8 * 2,
                                   num_filters * 8, is_deconv)
        self.dec4 = DecoderBlockV2(256 + num_filters * 8, num_filters * 8 * 2,
                                   num_filters * 8, is_deconv)
        self.dec3 = DecoderBlockV2(128 + num_filters * 8, num_filters * 4 * 2,
                                   num_filters * 2, is_deconv)
        self.dec2 = DecoderBlockV2(64 + num_filters * 2, num_filters * 2 * 2,
                                   num_filters * 2 * 2, is_deconv)
        self.dec1 = DecoderBlockV2(num_filters * 2 * 2, num_filters * 2 * 2,
                                   num_filters, is_deconv)
        self.dec0 = ConvRelu(num_filters, num_filters)
        self.final = nn.Conv2d(num_filters, num_classes, kernel_size=1)
Beispiel #18
0
    def __init__(self, num_classes=1000):
        super(Resnet34_8s, self).__init__()
        # Load the pretrained weights, remove avg pool
        # layer and get the output stride of 8
        resnet34_8s = resnet34(fully_conv=True,
                               pretrained=True,
                               output_stride=8,
                               remove_avg_pool_layer=True)

        # Randomly initialize the 1x1 Conv scoring layer
        resnet34_8s.fc = nn.Conv2d(resnet34_8s.inplanes, num_classes, 1)
        self.resnet34_8s = resnet34_8s
        self._normal_initialization(self.resnet34_8s.fc)
Beispiel #19
0
 def _load_pretrained_weight(self, channels=4):
     ## trick: get 4 channels weights from pretrained resnet
     _net = torchvision.models.resnet34(pretrained=True)
     state_dict = _net.state_dict().copy()
     layer0_weights = state_dict['conv1.weight']
     print('raw_weight size: ', layer0_weights.size())
     layer0_weights_new = torch.nn.Parameter(torch.cat((layer0_weights, layer0_weights[:,:1,:,:]),dim=1))
     print('new_weight size: ', layer0_weights_new.size())
     new_state_dict = OrderedDict(('conv1.weight', layer0_weights_new) if key == 'conv1.weight' \
                                  else (key, value) for key, value in state_dict.items())
     ## 
     net = resnet34(pretrained=False)
     net.load_state_dict(new_state_dict)
     return net
    def __init__(self, tiles_path, sparsity_ratio, simulation_file):
        super().__init__()
        rn34 = resnet34(tiles_path,
                        sparsity_ratio,
                        simulation_file,
                        pretrained=True)

        # discard last Resnet block, avrpooling and classification FC
        self.layer1 = nn.Sequential(*list(rn34.children())[:6])
        self.layer2 = nn.Sequential(*list(rn34.children())[6:7])
        # modify conv4 if necessary
        # Always deal with stride in first block
        modulelist = list(self.layer2.children())
        _ModifyBlock(modulelist[0], stride=(1, 1))
Beispiel #21
0
    def __init__(self):
        super(ShapeRecognizer, self).__init__()

        self.resnet = resnet34(True)

        # square modules
        self.square_attn = AttentionModule(dim=256)
        self.query = QueryModule(dim=256)
        self.classifier = Classifier(dim=256)
        # circle modules
        self.circle_attn = AttentionModule(dim=256)

        # triangle modules
        self.triangle_attn = AttentionModule(dim=256)
 def __init__(self, num_classes=1000):
     super(Resnet34_8s, self).__init__()
     # Load the pretrained weights, remove avg pool
     # layer and get the output stride of 8
     resnet34_8s = resnet34(fully_conv=True,
                            pretrained=True,
                            output_stride=8,
                            remove_avg_pool_layer=True)
     # Randomly initialize the 1x1 Conv scoring laye
     self.resnet34_8s = nn.Sequential(*list(resnet34_8s.children())[:-2])
     self.avg_pool_1 = nn.AvgPool2d(kernel_size=7, stride=1, padding=3)
     self.fc = nn.Conv2d(resnet34_8s.inplanes, num_classes, 1)
     self.avg_pool_2 = nn.AdaptiveAvgPool2d(output_size=(1, 1))
     self.linear = nn.Linear(in_features=512, out_features=3, bias=True)
     self._normal_initialization(self.fc)
Beispiel #23
0
    def load_model(self):
        # Load the model as defined by ` self.trained_model_prefix + "latest_model.pth" ` and already trained by train_model.py
        nb_outputs = ut.nbOutputs(self.label_style, self.environment)

        if self.model_type == 'dk_resnet18_CP':
            nb_outputs = 2  # FIXME: hard coded
            reward_fn_head = RewardFunctionHeadCartPole()
            net = RewardFunctionHeadModel(
                models.resnet18(pretrained=False, num_classes=nb_outputs),
                reward_fn_head)
        elif self.model_type == 'dk_resnet18_CP_weird':
            nb_outputs = 2  # FIXME: hard coded
            reward_fn_head = WeirdRewardFunctionHeadCartPole()
            net = RewardFunctionHeadModel(
                models.resnet18(pretrained=False, num_classes=nb_outputs),
                reward_fn_head)
        elif self.model_type == 'dk_resnet18_DT':
            nb_outputs = 2  # FIXME: hard coded
            reward_fn_head = RewardFunctionHeadDuckieTown()
            net = RewardFunctionHeadModel(
                models.resnet18(pretrained=False, num_classes=nb_outputs),
                reward_fn_head)
        elif self.model_type == 'resnet18':
            net = models.resnet18(pretrained=False, num_classes=nb_outputs)
            #### To use in case want the pretrained model: (remove num_classes as pretrained model only comes with original 1000 classes)
            # dim_feats = net.fc.in_features # =1000
            # net.fc = nn.Linear(dim_feats, nb_outputs)

        elif self.model_type == 'resnet34':
            net = resnet.resnet34(pretrained=False, num_classes=nb_outputs)
        elif self.model_type == 'resnet50':
            net = resnet.ResNet(resnet.Bottleneck, [3, 4, 6, 3],
                                num_classes=nb_outputs)
        elif self.model_type == 'resnet101':
            net = resnet.ResNet(resnet.Bottleneck, [3, 4, 23, 3],
                                num_classes=nb_outputs)
        elif self.model_type == 'resnet152':
            net = resnet.ResNet(resnet.Bottleneck, [3, 8, 36, 3],
                                num_classes=nb_outputs)

        net.load_state_dict(torch.load(self.model_path))
        print('Loaded model')
        net.eval()
        net = net.to(self.device)
        return net
Beispiel #24
0
    def __init__(self, use_nhwc=False, pad_input=False):
        super().__init__()

        if use_nhwc:
            rn34 = resnet34_nhwc(pretrained=True, pad_input=pad_input)
            idx = 5
        else:
            rn34 = resnet34(pretrained=True)
            idx = 6

        # discard last Resnet block, avrpooling and classification FC
        self.layer1 = nn.Sequential(*list(rn34.children())[:idx])
        self.layer2 = nn.Sequential(*list(rn34.children())[idx:idx + 1])
        # modify conv4 if necessary
        padding = None
        # Always deal with stride in first block
        modulelist = list(self.layer2.children())
        _ModifyBlock(modulelist[0], stride=(1, 1))
def load_pre():
    # model = torchvision.models.resnet34(pretrained=False)
    model = resnet34(num_classes=2, include_top=True)
    model_weight_path = "save_model/resnet34_pre.pth"

    # 删除resnet预训练模型参数的最后一层全连接层
    pre_weight_dict = torch.load(model_weight_path)
    for weight_name in list(pre_weight_dict.keys()):
        if weight_name == "fc.weight":
            pre_weight_dict.pop(weight_name)
        if weight_name == "fc.bias":
            pre_weight_dict.pop(weight_name)

    missing_keys, unexpected_keys = model.load_state_dict(pre_weight_dict, strict=False)
    # print(model.fc.in_features)
    # fc_inchannel = model.fc.in_features
    # model.fc = nn.Linear(fc_inchannel, 2)
    return model
Beispiel #26
0
def get_network(args, use_gpu=True):
    if args.net == 'resnet18':
        from resnet import resnet18
        net = resnet18(args)
    elif args.net == 'resnet34':
        from resnet import resnet34
        net = resnet34(args)
    elif args.net == 'resnet50':
        from resnet import resnet50
        net = resnet50(args)
    else:
        print('the network name you have entered is not supported yet')
        sys.exit()

    if use_gpu:
        net = net.cuda()

    return net
Beispiel #27
0
    def __init__(self, n_channels, n_classes, activation=None, dr=0):
        super(BASNet, self).__init__()

        self.activation = activation
        ## -------------Encoder--------------
        self.resnet = resnet34(pretrained=True)
        ## -------------Bridge--------------

        #stage Bridge
        self.convbg_2 = nn.Conv2d(512, 512, 3, dilation=2, padding=2)
        self.bnbg_2 = nn.BatchNorm2d(512)
        self.relubg_2 = nn.ReLU(inplace=True)

        ## -------------Decoder--------------

        self.decoder = UnetDecoder(encoder_channels=(512, 256, 128, 64, 64),
                                   dropout=dr)

        ## -------------Bilinear Upsampling--------------
        self.upscore6 = nn.Upsample(scale_factor=32,
                                    mode='bilinear',
                                    align_corners=True)  ###
        self.upscore5 = nn.Upsample(scale_factor=16,
                                    mode='bilinear',
                                    align_corners=True)
        self.upscore4 = nn.Upsample(scale_factor=8,
                                    mode='bilinear',
                                    align_corners=True)
        self.upscore3 = nn.Upsample(scale_factor=4,
                                    mode='bilinear',
                                    align_corners=True)
        self.upscore2 = nn.Upsample(scale_factor=2,
                                    mode='bilinear',
                                    align_corners=True)

        ## -------------Side Output--------------
        self.outconv6 = nn.Conv2d(512, 1, 3, padding=1)
        self.outconv5 = nn.Conv2d(256, 1, 3, padding=1)
        self.outconv4 = nn.Conv2d(128, 1, 3, padding=1)
        self.outconv3 = nn.Conv2d(64, 1, 3, padding=1)
        self.outconv2 = nn.Conv2d(32, 1, 3, padding=1)

        ## -------------Refine Module-------------
        self.refunet = RefUnet(1, 64)
Beispiel #28
0
    def __init__(self,
                 args,
                 label_num,
                 use_nhwc=False,
                 pad_input=False,
                 bn_group=1,
                 pretrained=True):

        super(SSD300, self).__init__()

        self.label_num = label_num
        self.use_nhwc = use_nhwc
        self.pad_input = pad_input
        self.bn_group = bn_group

        # Explicitly RN34 all the time
        out_channels = 256
        out_size = 38
        self.out_chan = [out_channels, 512, 512, 256, 256, 256]

        # self.model = ResNet(self.use_nhwc, self.pad_input, self.bn_group)

        rn_args = {
            'bn_group': bn_group,
            'pad_input': pad_input,
            'nhwc': use_nhwc,
            'pretrained': pretrained,
            'ssd_mods': True,
        }

        self.model = resnet34(**rn_args)

        self._build_additional_features()

        padding_channels_to = 8
        self._build_multibox_heads(use_nhwc, padding_channels_to)

        # after l2norm, conv7, conv8_2, conv9_2, conv10_2, conv11_2
        # classifer 1, 2, 3, 4, 5 ,6

        # intitalize all weights
        with torch.no_grad():
            self._init_weights()
def set_model():
    if Config.backbone == 'resnet18':
        model = resnet.resnet18(num_class=Config.out_class)
    if Config.backbone == 'resnet34':
        model = resnet.resnet34(num_class=Config.out_class, pretrained=Config.pretrain)
    if Config.backbone == 'resnet50':
        model = resnet.resnet50(num_class=Config.out_class, pretrained=Config.pretrain)
    if Config.backbone == 'ncrf18':
        model = ncrf.resnet18(num_class=Config.out_class)
    if Config.backbone == 'ncrf34':
        model = ncrf.resnet34(num_class=Config.out_class)
    if Config.backbone == 'ncrf50':
        model = ncrf.resnet50(num_class=Config.out_class)
    if Config.backbone == 'densenet121':
        model = densenet.densenet121(Config.out_class, pretrained=Config.pretrain, drop_rate=Config.drop_rate)
    if Config.backbone == 'msdn18':
        model = hardcore_msdn.msdn18(Config.out_class, ss=Config.ss, drop_rate=Config.drop_rate)
    if Config.backbone == 'alexnet':
        model = alexnet.alexnet(2)
    return model
Beispiel #30
0
    def __init__(self, backbone='resnet', layers=50, bins=(1, 2, 3, 6), dropout=0.1, classes=2, zoom_factor=8, use_ppm=True, use_softmax=True, pretrained=True, syncbn=True, group_size=8, group=None):
        super(PSPNet, self).__init__()
        assert layers in [18, 34, 50, 101, 152]
        self.zoom_factor = zoom_factor
        self.use_ppm = use_ppm
        self.use_softmax = use_softmax

        if backbone == 'resnet':
            import resnet as models
        else:
            raise NameError('Backbone type not defined!')

        if syncbn:
            # from lib.syncbn import SynchronizedBatchNorm2d as BatchNorm
            def BNFunc(*args, **kwargs):
                return SyncBatchNorm2d(*args, **kwargs, group_size=group_size, group=group, sync_stats=True)
            BatchNorm = BNFunc
        else:
            from torch.nn import BatchNorm2d as BatchNorm
        models.BatchNorm = BatchNorm

        if layers == 34:
            resnet = models.resnet34(pretrained=pretrained)
        elif layers == 18:
            resnet = models.resnet18(pretrained=pretrained)
        elif layers == 50:
            resnet = models.resnet50(pretrained=pretrained)
        elif layers == 101:
            resnet = models.resnet101(pretrained=pretrained)
        else:
            resnet = models.resnet152(pretrained=pretrained)
            
        self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool)
        self.layer1, self.layer2, self.layer3 = resnet.layer1, resnet.layer2, resnet.layer3
        self.layer4_ICR, self.layer4_PFR, self.layer4_PRP = resnet.layer4_ICR, resnet.layer4_PFR, resnet.layer4_PRP
        self.avgpool = nn.AvgPool2d(7, stride=1)