Ejemplo n.º 1
0
    def __init__(self, model_name='resnet34', num_classes=43):
        super().__init__()
        if model_name.lower() == 'resnet34':
            self.backbone = ptcv_get_model("resnet34", pretrained=True)

            self.backbone.features.final_pool = nn.AdaptiveAvgPool2d(1)
            self.backbone.output = nn.Linear(512, num_classes)
            # self.backbone.output = nn.Sequential(nn.Linear(512, 128),
            #                                      swish(),
            #                                      nn.Dropout(p=0.5),
            #                                      nn.Linear(128, num_classes))
        elif model_name.lower() == 'efficientnet_b7':
            self.backbone = EfficientNet.from_pretrained('efficientnet-b7')
            # self.backbone._fc = nn.Linear(2560, num_classes)

            self.backbone._fc = nn.Sequential(nn.Linear(2560, 256),
                                              Mish(),
                                              nn.Dropout(p=0.5),
                                              nn.Linear(256, num_classes))
        elif model_name.lower() == 'se_resnext101':
            self.backbone = ptcv_get_model("seresnext101_32x4d", pretrained=True)

            self.backbone.features.final_pool = nn.AdaptiveAvgPool2d(1)
            self.backbone.output = nn.Sequential(nn.Linear(2048, 256),
                                                 Mish(),
                                                 nn.Dropout(p=0.5),
                                                 nn.Linear(256, num_classes))
        else:
            raise NotImplementedError
Ejemplo n.º 2
0
    def __init__(self, net='seresnext50', pretrained=False, num_classes=1, dropout_flag=False, size=512):
        super(MySeResnext, self).__init__()
        self.dropout_flag = dropout_flag
        self.size = size

        if net == 'seresnext50':
            seresnext50 = ptcv_get_model("seresnext50_32x4d", pretrained=pretrained)
            if self.size == 224:
                self.model = nn.Sequential(*(list(seresnext50.children())[0]))
            else:
                self.model = nn.Sequential(*(list(seresnext50.children())[0][:-1]))
        elif net == 'seresnext101':
            seresnext101 = ptcv_get_model("seresnext101_32x4d", pretrained=pretrained)
            if self.size == 224:
                self.model = nn.Sequential(*(list(seresnext101.children())[0]))
            else:
                self.model = nn.Sequential(*(list(seresnext101.children())[0][:-1]))
        else:
            raise Warning("Wrong Net Name!!")

        if self.size != 224:
            self.avgpool = nn.AvgPool2d(int(size / 32), stride=1)

        if self.dropout_flag:
            self.dropout = nn.Dropout(0.5)

        self.last_fc = nn.Linear(2048, num_classes)
Ejemplo n.º 3
0
def init_network(params):

    # parse params with default values
    architecture = params.get('architecture', 'seresnet50')
    combination = params.get('combination', 'MG')
    pretrained = params.get('pretrained', True)
    classes = params.get('classes', 17331)
    output_dim = params.get('output_dim', 1536)

    # loading backbone network from pytorchcv
    # Github : https://github.com/osmr/imgclsmob/tree/master/pytorch
    if pretrained:
        net_in = ptcv_get_model(architecture, pretrained=True)
    else:
        net_in = ptcv_get_model(architecture, pretrained=False)

    features = list(net_in.children())[:-1]
    combination = COMBINATION[combination]

    meta = {
        'architecture': architecture,
        'outputdim': output_dim,
        'classes': classes
    }

    net = CGD(features, combination, output_dim, classes, meta)

    return net
Ejemplo n.º 4
0
def test(model_name, dataset_name, epsilon, temperature, out_dir: str):
    print('Load model: {}'.format(model_name))
    model = None
    if 'densenet' in model_name:
        trainset_name = 'cifar' + model_name.split('densenet')[-1]
        model = ptcv_get_model("densenet100_k12_bc_%s" % trainset_name,
                               pretrained=True)
    elif 'resnet' in model_name:
        trainset_name = 'cifar' + model_name.split('resnet')[0]
        model = ptcv_get_model("wrn28_10_%s" % trainset_name, pretrained=True)
    assert isinstance(model, torch.nn.Module)
    model.cuda()

    print('Create Dataloaders: {}'.format(dataset_name))
    testloader = None
    # if dataset_name != "Uniform" and dataset_name != "Gaussian":
    #     tests_ood = ImageFolder(osp.join('..', 'data', dataset_name), transform=transform)
    #     testloader = data.DataLoader(tests_ood, batch_size=1, shuffle=False, num_workers=4)
    if dataset_name == 'cifar10':
        testset = CIFAR10(root=osp.join('..', 'data'),
                          train=False,
                          download=True,
                          transform=transform)
        testloader = data.DataLoader(testset,
                                     batch_size=1,
                                     shuffle=False,
                                     num_workers=4)
    elif dataset_name == 'cifar100':
        testset = CIFAR100(root=osp.join('..', 'data'),
                           train=False,
                           download=True,
                           transform=transform)
        testloader = data.DataLoader(testset,
                                     batch_size=1,
                                     shuffle=False,
                                     num_workers=4)
    elif dataset_name in ['Uniform', 'Gaussian']:
        testset = CIFAR10(root=osp.join('..', 'data'),
                          train=False,
                          download=True,
                          transform=transform)
        testloader = data.DataLoader(testset,
                                     batch_size=1,
                                     shuffle=False,
                                     num_workers=4)
    elif dataset_name == 'SVHN':
        testset = SVHN(root=osp.join('..', 'data'),
                       split='test',
                       download=True,
                       transform=transform)
        testloader = data.DataLoader(testset,
                                     batch_size=1,
                                     shuffle=False,
                                     num_workers=4)

    print('Produce score: {}'.format(dataset_name))
    produce_score(model, testloader, model_name, dataset_name, epsilon,
                  temperature, out_dir)
Ejemplo n.º 5
0
def create_fmodel(dataset="tiny_imagenet",model_name="resnet18",gpu=None):

    if dataset == "imagenet":

        model = ptcv_get_model(model_name, pretrained=True)
        model.eval()
        if gpu is not None:
            model = model.cuda()
        #
        # def preprocessing(x):
        #     mean = np.array([0.485, 0.456, 0.406])
        #     std = np.array([0.229, 0.224, 0.225])
        #     _mean = mean.astype(x.dtype)
        #     _std = std.astype(x.dtype)
        #     x = x - _mean
        #     x /= _std
        #
        #     assert x.ndim in [3, 4]
        #     if x.ndim == 3:
        #         x = np.transpose(x, axes=(2, 0, 1))
        #     elif x.ndim == 4:
        #         x = np.transpose(x, axes=(0, 3, 1, 2))
        #
        #     def grad(dmdp):
        #         assert dmdp.ndim == 3
        #         dmdx = np.transpose(dmdp, axes=(1, 2, 0))
        #         return dmdx / _std
        #     return x, grad

        preprocessing = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], axis=-3)

        fmodel = PyTorchModel(model, bounds=(0, 1), num_classes=1000, preprocessing=preprocessing)

    elif dataset == "cifa10":

        model = ptcv_get_model(model_name, pretrained=True)
        model.eval()
        if gpu is not None:
            model = model.cuda()

        preprocessing = dict(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], axis=-3)

        fmodel = PyTorchModel(model, bounds=(0, 1), num_classes=10, preprocessing=preprocessing)

    elif dataset == "dev":

        model = ptcv_get_model(model_name, pretrained=True)
        model.eval()
        if gpu is not None:
            model = model.cuda()

        preprocessing = dict(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], axis=-3)

        fmodel = PyTorchModel(model, bounds=(0, 1), num_classes=1000, preprocessing=preprocessing)

    return fmodel
Ejemplo n.º 6
0
def get_menet_456(pretrained=False):
    net = ptcv_get_model("menet456_24x1_g3", pretrained=False)
    if pretrained:
        net = ptcv_get_model("menet456_24x1_g3", pretrained=True)
    num_ftrs = net.output.in_features
    out_ftrs = int(net.output.out_features / 4)
    net.output = nn.Sequential(
        nn.Sigmoid(),
        nn.Dropout(0.5),
        nn.Linear(num_ftrs, out_ftrs, bias=True),
        nn.SELU(),
        nn.Dropout(0.7),
        nn.Linear(in_features=out_ftrs, out_features=1, bias=True),
    )
    return net
def get_model(model_name,
              load_model='None',
              batch_size=32,
              device='cuda:0',
              pretrained=False):
    if model_name == 'shufflenetv2x0.5':
        model = ptcv_get_model('shufflenetv2_wd2', pretrained=pretrained)
        num_ftrs = model.output.in_features
        model.output = nn.Linear(num_ftrs, 2)
    else:
        raise NotImplementedError

    print('testing model')
    x = torch.randn(16, 3, 192, 192)
    y = model(x)
    print(y.size())
    print('model all set')

    # model must be converted to device and DataParallel so that checkpoint can be loaded
    model.to(device)
    model = nn.DataParallel(model)

    if load_model != 'None':
        checkpoint = torch.load(load_model)
        model.load_state_dict(checkpoint)

    return model
Ejemplo n.º 8
0
def densenet(dataset: str, depth: int, pretrained=False) -> Module:
    """
    Wrapper for densenet available in the pytorchcv package.

    :param dataset: One of cifar10, cifar100, svhn, imagenet.
    :param depth: The depth of the densnet. For imagenet depths
                  (121, 161, 169, 201) are supported. The other datasets
                   support dephts (40, 100).
    :param pretrained: load model pretrained on `dataset`..
    """
    if dataset in ["cifar10", "cifar100", "svhn"]:
        available_depths = [40, 100]
        # other growth rates are available through the general method.
        growth_rate = 12
        model_name = f"densenet{depth}_k{growth_rate}_{dataset}"
    elif dataset == "imagenet":
        available_depths = [121, 161, 169, 201]
        model_name = f"densenet{depth}"
    else:
        raise ValueError(f"Unrecognized dataset {dataset}")

    if depth not in available_depths:
        raise ValueError(f"Depth {depth} not available for dataset {dataset}, "
                         f"availble depths are {available_depths}")

    model = ptcv_get_model(model_name, pretrained=pretrained)
    return model
Ejemplo n.º 9
0
def efficientnet_b1b(in_channels, num_classes, pretrained=True):
    model = ptcv_get_model("efficientnet_b1b", pretrained=True)
    print(model)
    model.output = nn.Sequential(
        nn.Dropout(p=0.3, inplace=False), nn.Linear(1280, 7, bias=True)
    )
    return model
Ejemplo n.º 10
0
def pyramidnet(dataset: str, depth: int, pretrained=False) -> Module:
    """
    Wrapper for pyramidnet available in the pytorchcv package.

    :param dataset: One of cifar10, cifar100, svhn, imagenet.
    :param depth: The depth of the pyramidnet. For imagenet 101 is supported.
                  The other datasets support dephts (110, 164, 200, 236, 272).
    :param pretrained: load model pretrained on `dataset`..
    """
    if dataset in ["cifar10", "cifar100", "svhn"]:
        available_depths = [110, 164, 200, 236, 272]
        alpha = {110: 48, 164: 270, 200: 240, 236: 220, 272: 200}.get(depth)
        if depth < 200:
            model_name = f"pyramidnet{depth}_a{alpha}_{dataset}"
        else:
            # These models have batch normalization
            model_name = f"pyramidnet{depth}_a{alpha}_bn_{dataset}"
    elif dataset == "imagenet":
        available_depths = [101]
        alpha = 360
        model_name = f"pyramidnet{depth}_a{alpha}"
    else:
        raise ValueError(f"Unrecognized dataset {dataset}")

    if depth not in available_depths:
        raise ValueError(f"Depth {depth} not available for dataset {dataset}, "
                         f"availble depths are {available_depths}")

    model = ptcv_get_model(model_name, pretrained=pretrained)
    return model
Ejemplo n.º 11
0
    def __init__(self, model_name, ds_train, ds_val, ds_test, batch_size=64):

        super(MelanomaModel, self).__init__()

        self.ds_train = ds_train
        self.ds_val = ds_val
        self.ds_test = ds_test
        self.batch_size = batch_size
        self.val_logloss = dict()
        self.val_auc = dict()
        self.oof_preds = dict()

        if "efficient" in model_name:
            self.net = ptcv_get_model(model_name, pretrained=True)
            self.n_map_features = self.net.output.fc.in_features
            self.fc = nn.Linear(in_features=self.net.output.fc.in_features,
                                out_features=1)
            self.net.output.fc = nn.Identity()
        elif "resne" in model_name:
            self.net = models.resnet18(pretrained=True)
            self.n_map_features = list(self.net.children())[-1].in_features
            self.fc = nn.Linear(in_features=self.net.fc.in_features,
                                out_features=1)
            self.net.fc = nn.Identity()
        else:
            raise Exception("未実装")
Ejemplo n.º 12
0
    def _set_model(self):
        if self.settings.dataset in ["cifar100"]:
            self.test_input = Variable(torch.randn(1, 3, 32, 32).cuda())
            self.model = ptcv_get_model('resnet20_cifar100', pretrained=True)
            self.model_teacher = ptcv_get_model('resnet20_cifar100',
                                                pretrained=True)
            self.model_teacher.eval()

        elif self.settings.dataset in ["imagenet"]:
            self.test_input = Variable(torch.randn(1, 3, 224, 224).cuda())
            self.model = ptcv_get_model('resnet18', pretrained=True)
            self.model_teacher = ptcv_get_model('resnet18', pretrained=True)
            self.model_teacher.eval()

        else:
            assert False, "unsupport data set: " + self.settings.dataset
Ejemplo n.º 13
0
    def __init__(self,
                 mult=1.0,
                 feature_levels=(3, 4, 5),
                 pretrained=True,
                 include_final=False,
                 **kwargs):
        super().__init__()
        _check_levels(feature_levels)
        self.forward_levels = tuple(range(1, feature_levels[-1] + 1))
        self.feature_levels = feature_levels
        net = ptcv_get_model(self.mult2name[mult], pretrained=pretrained)
        del net.output
        net = net.features
        self.layer1 = net.init_block.conv
        self.layer2 = net.init_block.pool
        self.layer3 = net.stage1
        self.layer4 = net.stage2
        if include_final:
            self.layer5 = nn.Sequential(
                net.stage3,
                net.final_block,
            )
        else:
            self.layer5 = net.stage3
        out_channels = [
            get_out_channels(self.layer1),
            get_out_channels(self.layer1),
            calc_out_channels(self.layer3),
            calc_out_channels(self.layer4),
            calc_out_channels(self.layer5),
        ]

        self.out_channels = [out_channels[i - 1] for i in feature_levels]
Ejemplo n.º 14
0
 def __init__(self, name, feature_levels=(3, 4, 5), pretrained=True):
     super().__init__()
     _check_levels(feature_levels)
     self.forward_levels = tuple(range(1, feature_levels[-1] + 1))
     self.feature_levels = feature_levels
     net = ptcv_get_model(name, pretrained=pretrained)
     del net.output
     net = net.features
     self.layer1 = net.init_block.conv
     self.layer2 = nn.Sequential(
         net.init_block.pool,
         net.stage1,
     )
     self.layer3 = net.stage2
     self.layer4 = net.stage3
     if hasattr(net, "post_activ"):
         self.layer5 = nn.Sequential(
             net.stage4,
             net.post_activ,
         )
     else:
         self.layer5 = net.stage4
     self.out_channels = [
         calc_out_channels(getattr(self, ("layer%d" % i)))
         for i in feature_levels
     ]
Ejemplo n.º 15
0
def main():
    # prepare input data
    trf = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])
    inputs = trf(Image.open('./data/dog.jpg')).unsqueeze(0)

    # load existing model from pytorchcv lib (pretrain=False)
    net = ptcv_get_model('mobilenetv2_w1', pretrained=False)
    net.eval()
    output  = net(inputs)
    # print("original model:",net)

    # fuse the Batch Normalization
    net1 = fuse_bn_recursively(copy.deepcopy(net))
    net1.eval()
    output1 = net1(inputs)
    # print("BN fusion  model:",net1)

    # compare the output
    print("output of original model:",output.size())
    print("=> min : %.4f, mean : %.4f max : %.4f"%(output.min().detach().numpy(),output.mean().detach().numpy(),output.max().detach().numpy()))
    print("output of BNfusion model:",output1.size())
    print("=> min : %.4f, mean : %.4f max : %.4f"%(output1.min().detach().numpy(),output1.mean().detach().numpy(),output1.max().detach().numpy()))

    # transform to ONNX format for  visualization
    dummy_input = Variable(torch.randn([1, 3, 224, 224]))
    torch.onnx.export(net, dummy_input, "./data/mobilenet_v2.onnx")
    torch.onnx.export(net1, dummy_input, "./data/mobilenet_v2_nobn.onnx")
Ejemplo n.º 16
0
def resnet(dataset: str, depth: int, pretrained=False) -> Module:
    """
    Wrapper for (basic) renset available in the pytorchcv package. More variants
    are availble through the general wrapper.

    :param dataset: One of cifar10, cifar100, svhn, imagenet.
    :param depth: depth of the architecture, one of (10, 12, 14, 16, 18, 26, 34,
                  50, 101, 152, 200) for imagenet,
                  (20, 56, 110, 1001, 1202) for the other datasets.
    :param pretrained: loads model pretrained on `dataset`.
    """

    if dataset in ["cifar10", "cifar100", "svhn"]:
        available_depths = [20, 56, 110, 1001, 1202]
        model_name = f"resnet{depth}_{dataset}"
    elif dataset == "imagenet":
        available_depths = [10, 12, 14, 16, 18, 26, 34, 50, 101, 152, 200]
        model_name = f"resnet{depth}"
    else:
        raise ValueError(f"Unrecognized dataset {dataset}")

    if depth not in available_depths:
        raise ValueError(f"Depth {depth} not available for dataset {dataset}, "
                         f"availble depths are {available_depths}")

    model = ptcv_get_model(model_name, pretrained=pretrained)
    return model
Ejemplo n.º 17
0
 def __init__(self,
              width_mult=2.0,
              feature_levels=(3, 4, 5),
              pretrained=True,
              include_final=False):
     super().__init__()
     _check_levels(feature_levels)
     self.forward_levels = tuple(range(1, feature_levels[-1] + 1))
     self.feature_levels = feature_levels
     self.include_final = include_final
     name = self.mult2name[float(width_mult)]
     net = ptcv_get_model(name, pretrained=pretrained)
     del net.output
     net = net.features
     self.layer1 = net.init_block
     self.layer2 = net.stage1
     self.layer3 = net.stage2
     self.layer4 = net.stage3
     if include_final:
         self.layer51 = net.stage4
         self.layer52 = net.final_block
     else:
         self.layer5 = net.stage4
     out_channels = [
         net.stage1[-1].activ.num_parameters,
         net.stage2[-1].activ.num_parameters,
         net.stage3[-1].activ.num_parameters,
         net.final_block.conv2.conv.out_channels
         if include_final else net.stage4[-1].activ.num_parameters,
     ]
     self.out_channels = [out_channels[i - 2] for i in feature_levels]
Ejemplo n.º 18
0
    def __init__(self, classCount):
        super(Xception_osmr, self).__init__()

        self.model_ft = ptcv_get_model("xception", pretrained=True)
        num_ftrs = self.model_ft.output.in_features
        self.model_ft.features.final_block.pool = nn.AdaptiveAvgPool2d((1, 1))
        self.model_ft.output = nn.Sequential(
            nn.Linear(num_ftrs, classCount, bias=True), nn.Sigmoid())
Ejemplo n.º 19
0
def efficientnet_b1(pretrained=True):
  """
  Returns efficientnet-b1 feature extraction layers, using pytorchcv's
  ptcv_get_model helper method.
  Args:
    pretrained --> type:bool (default: True)
  """
  return ptcv_get_model("efficientnet_b1", pretrained=pretrained).features
Ejemplo n.º 20
0
    def init_weights(self, num_layers):
        # pretrained model for ShuffleNetV2 BaseNodes
        # url = model_urls['shufflenetv2_x1.0']
        # pretrained_state_dict = model_zoo.load_url(url)
        # print('=> loading pretrained model {}'.format(url))
        #
        # modified_dict = {}
        # for key, value in pretrained_state_dict.items():
        #     modified_key = key.replace("stage2", "layer1") \
        #         .replace("stage3", "layer2").replace("stage4", "layer3") \
        #         .replace("branch", "b").replace("conv1", "layer0").replace("conv5", "layer4")
        #     modified_dict[modified_key] = value
        # print(self.load_state_dict(modified_dict, strict=False))

        # if self.w2 == True:
        #     # self-defined pretrained model for Wide ShuffleNetV2 BaseNodes
        #     pretrained_state_dict = torch.load('/rscratch/'
        #         'zhendong/yaohuic/CenterNet/src/pretrain_logs/shufflenetv2_w2_512_trial1/model_best.pth.tar')['state_dict']
        #     print('=> loading self-defined wide ShuffleNetV2 pretrained model')
        # else:
        #     # self-defined pretrained model for ShuffleNetV2 BaseNodes
        #     pretrained_state_dict = torch.load('/rscratch/'
        #         'zhendong/yaohuic/CenterNet/src/pretrain_logs/shufflenetv2_1_512_trial1/model_best.pth.tar')['state_dict']
        #     print('=> loading self-defined pretrained model')
        #
        # modified_dict = {}
        # for key, value in pretrained_state_dict.items():
        #     modified_key = key.replace('module.', '').replace("stage2", "layer1") \
        #         .replace("stage3", "layer2").replace("stage4", "layer3") \
        #         .replace("branch", "b").replace("conv1", "layer0").replace("conv5", "layer4")
        #     modified_dict[modified_key] = value
        # print(self.load_state_dict(modified_dict, strict=False))


        # pretrained PyTorchCV model for ShuffleNetV2 BaseNodes
        if self.w2 == True:
            model_name = "shufflenetv2_w2"
        else:
            model_name = "shufflenetv2_w1"
        pretrained_state_dict = ptcv_get_model(model_name, pretrained=True).state_dict()
        print('=> loading PyTorchCV pretrained model {}'.format(model_name))
        modified_dict = {}
        for key, value in pretrained_state_dict.items():
            modified_key = key.replace("features.stage1.", "layer1.") \
                .replace("features.stage2.", "layer2.").replace("features.stage3.", "layer3.") \
                .replace("unit1.", "0.").replace("unit2.", "1.").replace("unit3.", "2.") \
                .replace("unit4.", "3.").replace("unit5.", "4.").replace("unit6.", "5.") \
                .replace("unit7.", "6.").replace("unit8.", "7.")  \
                .replace("compress_layer0", "b2.0") \
                .replace("dw_conv2", "b2.3").replace("compress_bn1", "b2.1") \
                .replace("dw_bn2", "b2.4").replace("compress_conv1", "b2.0") \
                .replace("expand_conv3", "b2.5").replace("expand_bn3", "b2.6") \
                .replace("dw_conv4", "b1.0").replace("dw_bn4", "b1.1") \
                .replace("expand_conv5", "b1.2").replace("expand_bn5", "b1.3") \
                .replace("features.final_block.conv", "layer4.0").replace("features.final_block.bn", "layer4.1") \
                .replace("features.init_block.conv.conv", "layer0.0").replace("features.init_block.conv.bn", "layer0.1")
            modified_dict[modified_key] = value
        print(self.load_state_dict(modified_dict, strict=False))
Ejemplo n.º 21
0
    def __init__(self,
                 version='b0',
                 feature_levels=(3, 4, 5),
                 pretrained=True,
                 **kwargs):
        super().__init__()
        _check_levels(feature_levels)
        self.forward_levels = tuple(range(1, feature_levels[-1] + 1))
        self.feature_levels = feature_levels
        name = 'efficientnet_%sc' % version
        backbone = ptcv_get_model(name, pretrained=pretrained)
        del backbone.output
        features = backbone.features
        self._kernel_sizes = [3]
        self._strides = [2]
        self.layer1 = nn.Sequential(
            features.init_block.conv,
            features.stage1,
            features.stage2.unit1.conv1,
        )
        self._kernel_sizes.append(features.stage2.unit1.kernel_size)
        self._strides.append(features.stage2.unit1.stride)
        self.layer2 = nn.Sequential(
            features.stage2.unit1.conv2,
            features.stage2.unit1.se,
            features.stage2.unit1.conv3,
            features.stage2[1:],
            features.stage3.unit1.conv1,
        )
        self._kernel_sizes.append(features.stage3.unit1.kernel_size)
        self._strides.append(features.stage3.unit1.stride)
        self.layer3 = nn.Sequential(
            features.stage3.unit1.conv2,
            features.stage3.unit1.se,
            features.stage3.unit1.conv3,
            features.stage3[1:],
            features.stage4.unit1.conv1,
        )
        self._kernel_sizes.append(features.stage4.unit1.kernel_size)
        self._strides.append(features.stage4.unit1.stride)
        self.layer4 = nn.Sequential(
            features.stage4.unit1.conv2,
            features.stage4.unit1.se,
            features.stage4.unit1.conv3,
            features.stage4[1:],
            features.stage5.unit1.conv1,
        )
        self._kernel_sizes.append(features.stage5.unit1.kernel_size)
        self._strides.append(features.stage5.unit1.stride)
        self.layer5 = nn.Sequential(features.stage5.unit1.conv2,
                                    features.stage5.unit1.se,
                                    features.stage5.unit1.conv3,
                                    features.stage5[1:], features.final_block)

        self.out_channels = np.array([
            get_out_channels(getattr(self, ("layer%d" % i)))
            for i in feature_levels
        ])
Ejemplo n.º 22
0
  def load_network(teacher=True):
      if args.teacher_arch == 'densenet' and teacher :
          net = ptcv_get_model("densenet40_k36_bc_cifar10", pretrained=True)
          net = ReturnLayers(net).to(device)
      elif args.teacher_arch == 'wrn' and teacher :
          net_checkpoint = torch.load('checkpoints/wrn_40_2_T.t7')
          start_epoch = net_checkpoint['epoch']
          net = WideResNet(net_checkpoint['depth'], net_checkpoint['width'], num_classes=net_checkpoint['num_classes'], dropRate=0).to(device)
          net.load_state_dict(net_checkpoint['state'])
      elif args.teacher_arch == 'darts' and teacher and not args.fisher and not args.fisher_teacher:
          net_checkpoint = torch.load('checkpoints/%s.t7' % args.teacher_checkpoint)
          genotype = eval("genotypes.%s" % args.std_arch)
          net = Network(args.init_channels, num_classes, args.teach_depth, args.auxiliary, genotype).to(device)
          net.load_state_dict(net_checkpoint['state'])
      elif args.teacher_arch == 'darts' and teacher and args.fisher:
          net = rank_at_param_goal(args.std_arch, args.init_channels, 10, 10, args.auxiliary, \
                                   args.cifar_loc, args.batch_size, 100, args.dataset, args.param_goal, args.teacher_checkpoint)
      elif args.teacher_arch == 'darts' and teacher and args.fisher_teacher:
          net_checkpoint = torch.load('checkpoints/%s.t7' % args.teacher_checkpoint)
          genotype = eval("genotypes.%s" % args.std_arch)
          net = Network(args.init_channels, num_classes, args.student_depth, args.auxiliary, genotype).to(device)
          with open(args.groups_file, 'r+') as f:
            groups = f.readlines()
            group = 0
            for cell in net.cells:
                for op in cell._ops:
                    if isinstance(op, SepConv) or isinstance(op, DilConv):
                      op.update(int(groups[group]))
                      group +=1
          net.drop_path_prob = 0.2
          data = torch.rand(2,3,32,32).cuda()
          net(data)
          net.load_state_dict(net_checkpoint['state'])

      elif args.student_arch == 'densenet' and not teacher :
          net = ptcv_get_model("densenet100_k12_bc_cifar10", pretrained=False)
          net = ReturnLayers(net).to(device)
      elif args.student_arch == 'wrn' and not teacher :
          net = WideResNet(args.student_depth, args.student_width, num_classes=num_classes, dropRate=0).to(device)
      elif args.student_arch == 'darts' and not teacher:
          genotype = eval("genotypes.%s" % args.std_arch)
          net = Network(args.init_channels, num_classes, args.student_depth, args.auxiliary, genotype).to(device)
          net.drop_path_prob = 0.2
      return net
def resattnet56(in_channels=3, num_classes=1000, pretrained=True):
    model = ptcv_get_model("resattnet56", pretrained=False)

    if pretrained is True:
        state = torch.hub.load_state_dict_from_url(
            "https://github.com/phamquiluan/ResidualAttentionNetwork/releases/download/v0.1.0/resattnet56.pth"
        )
        model.load_state_dict(state["state_dict"])
    model.output = nn.Linear(2048, num_classes)
    return model
Ejemplo n.º 24
0
    def __init__(self,
                 version='b0',
                 feature_levels=(3, 4, 5),
                 pretrained=True,
                 frozen_stages=-1,
                 **kwargs):
        super().__init__()
        self.feature_levels = feature_levels
        self.frozen_stages = frozen_stages
        name = 'efficientnet_%sc' % version
        backbone = ptcv_get_model(name, pretrained=pretrained)
        del backbone.output
        features = backbone.features
        self._kernel_sizes = [3]
        self._strides = [2]
        self.layer1 = nn.Sequential(
            features.init_block.conv,
            features.stage1,
            features.stage2.unit1.conv1,
        )
        self._kernel_sizes.append(features.stage2.unit1.kernel_size)
        self._strides.append(features.stage2.unit1.stride)
        self.layer2 = nn.Sequential(
            features.stage2.unit1.conv2,
            features.stage2.unit1.se,
            features.stage2.unit1.conv3,
            features.stage2[1:],
            features.stage3.unit1.conv1,
        )
        self._kernel_sizes.append(features.stage3.unit1.kernel_size)
        self._strides.append(features.stage3.unit1.stride)
        self.layer3 = nn.Sequential(
            features.stage3.unit1.conv2,
            features.stage3.unit1.se,
            features.stage3.unit1.conv3,
            features.stage3[1:],
            features.stage4.unit1.conv1,
        )
        self._kernel_sizes.append(features.stage4.unit1.kernel_size)
        self._strides.append(features.stage4.unit1.stride)
        self.layer4 = nn.Sequential(
            features.stage4.unit1.conv2,
            features.stage4.unit1.se,
            features.stage4.unit1.conv3,
            features.stage4[1:],
            features.stage5.unit1.conv1,
        )
        self._kernel_sizes.append(features.stage5.unit1.kernel_size)
        self._strides.append(features.stage5.unit1.stride)
        self.layer5 = nn.Sequential(features.stage5.unit1.conv2,
                                    features.stage5.unit1.se,
                                    features.stage5.unit1.conv3,
                                    features.stage5[1:], features.final_block)

        self._freeze_stages()
Ejemplo n.º 25
0
def main():
    args = parse_args()
    cfg = Config.fromfile(args.config)
    logger = get_logger(cfg.log_level)

    # init distributed environment if necessary
    if args.launcher == 'none':
        dist = False
        logger.info('Disabled distributed training.')
    else:
        dist = True
        init_dist(**cfg.dist_params)
        world_size = torch.distributed.get_world_size()
        rank = torch.distributed.get_rank()
        if rank != 0:
            logger.setLevel('ERROR')
        logger.info('Enabled distributed training.')

    try:
        dataset = getattr(data, cfg.dataset_type)
        train_loader, val_loader = dataset(cfg.data_root, batch_size = cfg.total_bs, imgsize=cfg.img_size)
    except:
        print("Dataset Type {} are Not Implemented. ")
        exit()

    # build model
    try:
        model = ptcv_get_model(cfg.model.type, pretrained=cfg.model.pretrained)
    except:
        print("Model Not Implemented. ")
        exit()

    if dist:
        model = DistributedDataParallel(model.cuda(), device_ids=[torch.cuda.current_device()])
    else:
        model = DataParallel(model, device_ids=cfg.gpus).cuda()

    # build runner and register hooks
    runner = Runner(model,batch_processor,cfg.optimizer,cfg.work_dir,log_level=cfg.log_level)
    runner.register_training_hooks(
        lr_config=cfg.lr_config,
        optimizer_config=cfg.optimizer_config,
        checkpoint_config=cfg.checkpoint_config,
        log_config=cfg.log_config)
    if dist:
        runner.register_hook(DistSamplerSeedHook())

    # load param (if necessary) and run
    if cfg.get('resume_from') is not None:
        runner.resume(cfg.resume_from)
    elif cfg.get('load_from') is not None:
        runner.load_checkpoint(cfg.load_from)

    runner.run([train_loader, val_loader], cfg.workflow, cfg.total_epochs)
Ejemplo n.º 26
0
    def __init__(self, num_classes, pretrained=True, size=224):
        super(PretrainedMobilenetWD4, self).__init__()
        self.size = size
        mobilenet = ptcv_get_model("mobilenet_wd4", pretrained=pretrained)

        if self.size == 224:
            self.model = nn.Sequential(*(list(mobilenet.children())[0]))
        else:
            self.model = nn.Sequential(*(list(mobilenet.children())[0][:-1]))
            self.avgpool = nn.AvgPool2d(int(self.size / 32), stride=1)

        self.last_fc = nn.Linear(256, num_classes)
Ejemplo n.º 27
0
def cbam_resnet50(in_channels, num_classes, pretrained=1):
    if pretrained==1:
        model = ptcv_get_model("cbam_resnet50", pretrained=True)
        model.output = nn.Linear(2048, num_classes)
        print('using pretrained imagenet weights')
    else:
        model = ptcv_get_model("cbam_resnet50", pretrained=False)
        model.output = nn.Linear(2048, num_classes)
        if pretrained==2:
            print('using pretrained lfw weights')
            state_dict = torch.load('saved/checkpoints/cbam_resnet50__n_2021Apr20_00.24')['net']
            model.load_state_dict(state_dict)

    # freeze params
    # for name,param in model.named_parameters():
        # if "stage4" not in name:
        #     param.requires_grad = False
    # model.output = nn.Linear(2048, num_classes)
    # model.output = nn.Sequential(nn.Dropout(p=0.5, inplace=False), nn.Linear(2048, num_classes))

    return model
Ejemplo n.º 28
0
    def __init__(self, num_classes, pretrained=True, size=224):
        super(PretrainedSeresNext50, self).__init__()
        self.size = size
        seresnext50 = ptcv_get_model("seresnext50_32x4d", pretrained=pretrained)

        if self.size == 224:
            self.model = nn.Sequential(*(list(seresnext50.children())[0]))
        else:
            self.model = nn.Sequential(*(list(seresnext50.children())[0][:-1]))
            self.avgpool = nn.AvgPool2d(int(self.size / 32), stride=1)

        self.last_fc = nn.Linear(2048, num_classes)
    def __init__(self, feature_levels=(3, 4, 5), pretrained=False, **kwargs):
        super().__init__()
        _check_levels(feature_levels)
        self.forward_levels = tuple(range(1, feature_levels[-1] + 1))
        self.out_levels = feature_levels
        if pretrained:
            net = ptcv_get_model("darknet53", pretrained=True)
            del net.output
            net = net.features
            self.layer1 = nn.Sequential(
                net.init_block,
                net.stage1,
            )
            self.layer2 = net.stage2
            self.layer3 = net.stage3
            self.layer4 = net.stage4
            self.layer5 = net.stage5
        else:
            backbone = BDarknet(num_classes=1, **kwargs)
            del backbone.fc
            self.layer1 = nn.Sequential(
                backbone.conv0,
                backbone.down1,
                backbone.layer1,
            )

            self.layer2 = nn.Sequential(
                backbone.down2,
                backbone.layer2,
            )

            self.layer3 = nn.Sequential(
                backbone.down3,
                backbone.layer3,
            )

            self.layer4 = nn.Sequential(
                backbone.down4,
                backbone.layer4,
            )

            self.layer5 = nn.Sequential(
                backbone.down5,
                backbone.layer5,
            )
        self.out_channels = [
            get_out_channels(getattr(self, ("layer%d" % i)))
            for i in feature_levels
        ]
Ejemplo n.º 30
0
def maskrcnn_inceptionresnetv2_rpn(num_classes=2):
    backbone = ptcv_get_model("InceptionResNetV2", pretrained=True).features
    backbone.out_channels = 1536

    anchor_generator = AnchorGenerator(sizes=((128, 256, 512, 768), ),
                                       aspect_ratios=((0.5, 1.0, 2.0), ))

    model = MaskRCNN(
        backbone,
        num_classes=num_classes,

        # transform parameters
        min_size=800,
        max_size=1333,
        image_mean=None,
        image_std=None,

        # RPN parameters
        rpn_anchor_generator=anchor_generator,
        rpn_head=None,
        rpn_pre_nms_top_n_train=2000,
        rpn_pre_nms_top_n_test=1000,
        rpn_post_nms_top_n_train=2000,
        rpn_post_nms_top_n_test=1000,
        rpn_nms_thresh=0.7,
        rpn_fg_iou_thresh=0.7,
        rpn_bg_iou_thresh=0.3,
        rpn_batch_size_per_image=256,
        rpn_positive_fraction=0.5,

        # Box parameters
        box_roi_pool=None,
        box_head=None,
        box_predictor=None,
        box_score_thresh=0.05,
        box_nms_thresh=0.5,
        box_detections_per_img=100,
        box_fg_iou_thresh=0.5,
        box_bg_iou_thresh=0.5,
        box_batch_size_per_image=512,
        box_positive_fraction=0.25,
        bbox_reg_weights=None,

        # Mask parameters
        mask_roi_pool=None,
        mask_head=None,
        mask_predictor=None)

    return model