コード例 #1
0
    def __init__(self, num_class):
        super(multiscale_se_resnext_cat, self).__init__()

        self.base_model1 = se_resnext50_32x4d(9, None)
        self.base_model2 = se_resnext50_32x4d(9, None)

        self.dropout = nn.Dropout(0.5)
        self.classifier = nn.Linear(4096, 1024)
コード例 #2
0
def load_se_resnet50(pretrained_path=None):

    model = se_resnext50_32x4d()

    #    load_from_path(pretrained_path, model)

    return model
コード例 #3
0
    def __init__(self, num_classes=3, num_filters=32,
             pretrained=True, is_deconv=True):
        super().__init__()
        self.num_classes = num_classes
        pretrain = 'imagenet' if pretrained is True else None
        self.encoder = se_resnext50_32x4d(num_classes=1000, pretrained=pretrain)
        bottom_channel_nr = 2048

        self.conv1 = self.encoder.layer0
        #self.se_e1 = SCSEBlock(64)
        self.conv2 = self.encoder.layer1
        #self.se_e2 = SCSEBlock(64 * 4)
        self.conv3 = self.encoder.layer2
        #self.se_e3 = SCSEBlock(128 * 4)
        self.conv4 = self.encoder.layer3
        #self.se_e4 = SCSEBlock(256 * 4)
        self.conv5 = self.encoder.layer4
        #self.se_e5 = SCSEBlock(512 * 4)

        self.center = DecoderCenter(bottom_channel_nr, num_filters * 8 *2, num_filters * 8, False)

        self.dec5 = DecoderBlockV(bottom_channel_nr + num_filters * 8, num_filters * 8 * 2, num_filters * 2, is_deconv)
        #self.se_d5 = SCSEBlock(num_filters * 2)
        self.dec4 = DecoderBlockV(bottom_channel_nr // 2 + num_filters * 2, num_filters * 8, num_filters * 2, is_deconv)
        #self.se_d4 = SCSEBlock(num_filters * 2)
        self.dec3 = DecoderBlockV(bottom_channel_nr // 4 + num_filters * 2, num_filters * 4, num_filters * 2, is_deconv)
        #self.se_d3 = SCSEBlock(num_filters * 2)
        self.dec2 = DecoderBlockV(bottom_channel_nr // 8 + num_filters * 2, num_filters * 2, num_filters * 2, is_deconv)
        #self.se_d2 = SCSEBlock(num_filters * 2)
        self.dec1 = DecoderBlockV(num_filters * 2, num_filters, num_filters * 2, is_deconv)
        #self.se_d1 = SCSEBlock(num_filters * 2)
        self.dec0 = ConvRelu(num_filters * 10, num_filters * 2)
        self.final = nn.Conv2d(num_filters * 2, num_classes, kernel_size=1)
コード例 #4
0
def main():
  model = se_resnext50_32x4d()
  num_ftrs = model.last_linear.in_features

  model.last_linear = nn.Linear(num_ftrs, len(class_names))
  
  for param in model.parameters():
    param.requires_grad = False
  for param in model.layer3.parameters():
    param.requires_grad = True
  for param in model.layer4.parameters():
    param.requires_grad = True
  for param in model.last_linear.parameters():
    param.requires_grad = True

  criterion = nn.CrossEntropyLoss()

  if use_gpu:
    cudnn.benchmark = True
    cudnn.deterministic = True
    criterion = criterion.cuda()
    model = model.cuda()
    model = torch.nn.DataParallel(model, device_ids=range(torch.cuda.device_count()))

  optimizer_ft = optim.SGD([p for p in model.parameters() if p.requires_grad],
                           lr=0.01, momentum=0.9, weight_decay=5e-5)

  rop_lr_scheduler = lr_scheduler.ReduceLROnPlateau(optimizer_ft, factor=0.5)

  if not os.path.isdir('checkpoint'):
    os.mkdir('checkpoint')

  train_model(model, criterion, optimizer_ft, rop_lr_scheduler, num_epochs=100)
コード例 #5
0
def main():
    args = get_parse()
    catalyst.utils.set_global_seed(args.seed)
    catalyst.utils.prepare_cudnn(deterministic=True)

    print('Make Data set data frame')
    df, class_names = make_df(data_root=args.data_rootdir)
    num_class = len(class_names)

    print('Get data loaders')
    loaders = get_train_valid_loaders(
        df=df,
        test_size=0.2,
        random_state=args.seed,
        data_root=args.data_rootdir,
        num_class=num_class,
        batch_size=args.batch_size,
        num_workers=args.num_workers,
        img_size=args.img_size
    )

    print('Make model')
    if args.frn:
        model = se_resnext50_32x4d_frn(pretrained=None)
        model.last_linear = nn.Linear(512 * 16, num_class)
    else:
        model = se_resnext50_32x4d()
        model.last_linear = nn.Linear(512 * 16, num_class)

    print('Get optimizer and scheduler')
    # learning rate for FRN is very very sensitive !!!
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=3e-5 if args.frn else 3e-4)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer=optimizer,
        T_max=args.num_epochs,
        eta_min=1e-6 if args.frn else 1e-5,
        last_epoch=-1
    )

    log_base = './output/cls'
    dir_name = f'seresnext50{"_frn" if args.frn else ""}_bs_{args.batch_size}_fp16_{args.fp16}'

    print('Start training...')
    runner = SupervisedRunner(device=catalyst.utils.get_device())
    runner.train(
        model=model,
        criterion=criterion,
        optimizer=optimizer,
        scheduler=scheduler,
        loaders=loaders,
        logdir=os.path.join(log_base, dir_name),
        callbacks=get_callbacks(num_classes=num_class),
        num_epochs=args.num_epochs,
        main_metric="accuracy01",
        minimize_metric=False,
        fp16=dict(opt_level="O1") if args.fp16 else None,
        verbose=False
    )
def test_clsmodel():
    # Base Model
    model = se_resnext50_32x4d(pretrained=None)
    model.last_linear = torch.nn.Linear(512 * 16, 2)
    summary(model, (3, 256, 256), batch_size=2)

    # Use FRN Model
    model = se_resnext50_32x4d_frn(pretrained=None)
    model.last_linear = torch.nn.Linear(512 * 16, 2)
    summary(model, (3, 256, 256), batch_size=2)
コード例 #7
0
    def __init__(self, backbone1, backbone2, drop, pretrained=True):
        super(MultiModalNet, self).__init__()

        self.visit_model = DPN26()
        if backbone1 == 'se_resnext101_32x4d':
            self.img_encoder = se_resnext101_32x4d(9, None)
            self.img_fc = nn.Linear(2048, 256)

        elif backbone1 == 'se_resnext50_32x4d':
            self.img_encoder = se_resnext50_32x4d(9, None)

            print(
                "load pretrained model from /home/zxw/2019BaiduXJTU/se_resnext50_32x4d-a260b3a4.pth"
            )
            state_dict = torch.load(
                '/home/zxw/2019BaiduXJTU/se_resnext50_32x4d-a260b3a4.pth')

            state_dict.pop('last_linear.bias')
            state_dict.pop('last_linear.weight')
            self.img_encoder.load_state_dict(state_dict, strict=False)

            self.img_fc = nn.Linear(2048, 256)

        elif backbone1 == 'se_resnext26_32x4d':
            self.img_encoder = se_resnext26_32x4d(9, None)
            self.img_fc = nn.Linear(2048, 256)

        elif backbone1 == 'multiscale_se_resnext':
            self.img_encoder = multiscale_se_resnext(9)
            self.img_fc = nn.Linear(2048, 256)

        elif backbone1 == 'multiscale_se_resnext_cat':
            self.img_encoder = multiscale_se_resnext(9)
            self.img_fc = nn.Linear(1024, 256)

        elif backbone1 == 'multiscale_se_resnext_HR':
            self.img_encoder = multiscale_se_resnext_HR(9)
            self.img_fc = nn.Linear(2048, 256)

        elif backbone1 == 'se_resnet50':
            self.img_encoder = se_resnet50(9, None)
            print(
                "load pretrained model from /home/zxw/2019BaiduXJTU/se_resnet50-ce0d4300.pth"
            )
            state_dict = torch.load(
                '/home/zxw/2019BaiduXJTU/se_resnet50-ce0d4300.pth')

            state_dict.pop('last_linear.bias')
            state_dict.pop('last_linear.weight')
            self.img_encoder.load_state_dict(state_dict, strict=False)

            self.img_fc = nn.Linear(2048, 256)

        self.dropout = nn.Dropout(0.5)
        self.cls = nn.Linear(512, 9)
コード例 #8
0
    def __init__(self, num_classes=1):
        super(model50A_slim_DeepSupervion, self).__init__()

        self.num_classes = num_classes
        self.encoder = se_resnext50_32x4d()

        self.relu = nn.ReLU(inplace=True)
        self.conv1 = nn.Sequential(self.encoder.layer0.conv1,
                                   self.encoder.layer0.bn1,
                                   self.encoder.layer0.relu1)

        self.conv2 = self.encoder.layer1
        self.conv3 = self.encoder.layer2
        self.conv4 = self.encoder.layer3
        self.conv5 = self.encoder.layer4

        self.center_global_pool = nn.AdaptiveAvgPool2d([1, 1])
        self.center_conv1x1 = nn.Conv2d(512 * 4, 64, kernel_size=1)
        self.center_fc = nn.Linear(64, 2)

        self.center = nn.Sequential(
            nn.Conv2d(512 * 4, 512, kernel_size=3, padding=1),
            nn.BatchNorm2d(512), nn.ReLU(inplace=True),
            nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256),
            nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2))

        self.dec5_1x1 = nn.Sequential(nn.Conv2d(512 * 4, 512, kernel_size=1),
                                      nn.BatchNorm2d(512),
                                      nn.ReLU(inplace=True))
        self.decoder5 = Decoder_bottleneck(256 + 512, 512, 64)

        self.dec4_1x1 = nn.Sequential(nn.Conv2d(256 * 4, 256, kernel_size=1),
                                      nn.BatchNorm2d(256),
                                      nn.ReLU(inplace=True))
        self.decoder4 = Decoder_bottleneck(64 + 256, 256, 64)

        self.dec3_1x1 = nn.Sequential(nn.Conv2d(128 * 4, 128, kernel_size=1),
                                      nn.BatchNorm2d(128),
                                      nn.ReLU(inplace=True))
        self.decoder3 = Decoder_bottleneck(64 + 128, 128, 64)

        self.dec2_1x1 = nn.Sequential(nn.Conv2d(64 * 4, 64, kernel_size=1),
                                      nn.BatchNorm2d(64),
                                      nn.ReLU(inplace=True))
        self.decoder2 = Decoder_bottleneck(64 + 64, 64, 64)

        self.decoder1 = Decoder_bottleneck(64, 32, 64)

        self.logits_no_empty = nn.Sequential(
            nn.Conv2d(320, 64, kernel_size=3, padding=1),
            nn.ReLU(inplace=True), nn.Conv2d(64, 1, kernel_size=1, padding=0))

        self.logits_final = nn.Sequential(
            nn.Conv2d(320 + 64, 64, kernel_size=3, padding=1),
            nn.ReLU(inplace=True), nn.Conv2d(64, 1, kernel_size=1, padding=0))
コード例 #9
0
ファイル: utils.py プロジェクト: sweatybridge/pneumonia
    def __init__(self, weights_path, device, n_classes=2, save=None):
        super().__init__()

        self.model = se_resnext50_32x4d(pretrained=None)
        self.model.load_state_dict(
            torch.load(weights_path, map_location=device))
        if save is not None:
            torch.save(self.model.state_dict(), save)

        self.model.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.model.last_linear = nn.Linear(self.model.last_linear.in_features,
                                           n_classes)
    def __init__(self,
                 num_classes=5,
                 num_filters=32,
                 cls_only=False,
                 is_deconv=False):
        super().__init__()
        self.num_classes = num_classes
        self.pool = nn.MaxPool2d(2, 2)
        self.encoder = se_resnext50_32x4d()
        self.relu = nn.ReLU(inplace=True)
        self.conv1 = self.encoder.layer0
        self.conv2 = self.encoder.layer1
        self.conv3 = self.encoder.layer2
        self.conv4 = self.encoder.layer3
        self.conv5 = self.encoder.layer4
        self.cls_only = cls_only

        self.classifer = nn.Sequential(
            se_resnext50_32x4d().layer4,
            nn.Conv2d(2048, 2, 1, stride=1, padding=1, bias=True),
            GlobalAvgPool2d())

        self.center = DecoderBlockBN(2048, num_filters * 8 * 2,
                                     num_filters * 8, is_deconv)

        self.dec5 = DecoderBlockBN(2048 + num_filters * 8, num_filters * 8 * 2,
                                   num_filters * 8, is_deconv)
        self.dec4 = DecoderBlockBN(1024 + num_filters * 8, num_filters * 8 * 2,
                                   num_filters * 8, is_deconv)
        self.dec3 = DecoderBlockBN(512 + num_filters * 8, num_filters * 4 * 2,
                                   num_filters * 2, is_deconv)
        self.dec2 = DecoderBlockBN(256 + num_filters * 2, num_filters * 2 * 2,
                                   num_filters * 2 * 2, is_deconv)
        self.dec1 = DecoderBlockBN(num_filters * 2 * 2, num_filters * 2 * 2,
                                   num_filters, is_deconv)
        self.dec0 = ConvReluBn(num_filters, num_filters)
        self.final = nn.Conv2d(num_filters, num_classes, kernel_size=1)
        self.drop = nn.Dropout2d(0.3)
コード例 #11
0
ファイル: model.py プロジェクト: lzj1769/PANDA
    def __init__(self, arch, num_classes=1, pretrained=True):
        super().__init__()

        # load EfficientNet
        if arch == 'se_resnext50_32x4d':
            if pretrained:
                self.base = se_resnext50_32x4d()
            else:
                self.base = se_resnext50_32x4d(pretrained=None)
            self.nc = self.base.last_linear.in_features

        elif arch == 'se_resnext101_32x4d':
            if pretrained:
                self.base = se_resnext101_32x4d()
            else:
                self.base = se_resnext101_32x4d(pretrained=None)
            self.nc = self.base.last_linear.in_features

        elif arch == 'inceptionv4':
            if pretrained:
                self.base = inceptionv4()
            else:
                self.base = inceptionv4(pretrained=None)
            self.nc = self.base.last_linear.in_features

        elif arch == 'inceptionresnetv2':
            if pretrained:
                self.base = inceptionresnetv2()
            else:
                self.base = inceptionresnetv2(pretrained=None)
            self.nc = self.base.last_linear.in_features

        self.logit = nn.Sequential(AdaptiveConcatPool2d(1), Flatten(),
                                   nn.BatchNorm1d(2 * self.nc),
                                   nn.Dropout(0.5),
                                   nn.Linear(2 * self.nc, 512), Mish(),
                                   nn.BatchNorm1d(512), nn.Dropout(0.5),
                                   nn.Linear(512, 1))
コード例 #12
0
    def __init__(self, num_class, pretrain=True):
        super(multiscale_se_resnext_HR, self).__init__()

        self.base_model = se_resnext50_32x4d(9, None)

        if pretrain == True:
            print(
                "load model from /home/zxw/2019BaiduXJTU/se_resnext50_32x4d-a260b3a4.pth"
            )
            state_dict = torch.load(
                '/home/zxw/2019BaiduXJTU/se_resnext50_32x4d-a260b3a4.pth')

            state_dict.pop('last_linear.bias')
            state_dict.pop('last_linear.weight')
            self.base_model.load_state_dict(state_dict, strict=False)

        self.avgpool = nn.AdaptiveAvgPool2d(output_size=1)
コード例 #13
0
    def __init__(self, num_classes=1):
        super(model50A_DeepSupervion, self).__init__()

        self.num_classes = num_classes
        self.encoder = se_resnext50_32x4d()

        self.relu = nn.ReLU(inplace=True)
        self.conv1 = nn.Sequential(self.encoder.layer0.conv1,
                                   self.encoder.layer0.bn1,
                                   self.encoder.layer0.relu1)

        self.conv2 = self.encoder.layer1
        self.conv3 = self.encoder.layer2
        self.conv4 = self.encoder.layer3
        self.conv5 = self.encoder.layer4

        self.center_global_pool = nn.AdaptiveAvgPool2d([1,1])
        self.center_conv1x1 = nn.Conv2d(512*4, 64, kernel_size=1)
        self.center_fc = nn.Linear(64, 2)

        self.center = nn.Sequential(nn.Conv2d(512*4, 512, kernel_size=3,padding=1),
                                    nn.BatchNorm2d(512),
                                    nn.ReLU(inplace=True),
                                    nn.Conv2d(512, 256, kernel_size=3, padding=1),
                                    nn.BatchNorm2d(256),
                                    nn.ReLU(inplace=True),
                                    nn.MaxPool2d(kernel_size=2,stride=2))

        self.decoder5 = Decoder(256 + 512*4, 512, 64)
        self.decoder4 = Decoder(64 + 256*4, 256, 64)
        self.decoder3 = Decoder(64 + 128*4, 128, 64)
        self.decoder2 = Decoder(64 + 64*4, 64, 64)
        self.decoder1 = Decoder(64, 32, 64)

        self.logits_no_empty = nn.Sequential(nn.Conv2d(320, 64, kernel_size=3, padding=1),
                                    nn.ReLU(inplace=True),
                                    nn.Conv2d(64, 1, kernel_size=1, padding=0))

        self.logits_final = nn.Sequential(nn.Conv2d(320+64, 64, kernel_size=3, padding=1),
                                         nn.ReLU(inplace=True),
                                         nn.Conv2d(64, 1, kernel_size=1, padding=0))
コード例 #14
0
ファイル: model.py プロジェクト: gittigxuy/imet
def create_model(config: Any, pretrained: bool) -> Any:
    dropout = config.model.dropout

    # support the deprecated model
    if config.version == '2b_se_resnext50':
        model = se_resnext50_32x4d(
            pretrained='imagenet' if pretrained else None)
        model.avg_pool = nn.AdaptiveAvgPool2d(1)
        model.last_linear = nn.Linear(model.last_linear.in_features,
                                      config.model.num_classes)

        model = torch.nn.DataParallel(model)
        return model

    if not IN_KERNEL:
        model = get_model(config.model.arch, pretrained=pretrained)
    else:
        model = get_model(config.model.arch,
                          pretrained=pretrained,
                          root='../input/pytorchcv-models/')

    if config.model.arch == 'xception':
        model.features[-1].pool = nn.AdaptiveAvgPool2d(1)
    else:
        model.features[-1] = nn.AdaptiveAvgPool2d(1)

    if config.model.arch == 'pnasnet5large':
        if dropout == 0.0:
            model.output = nn.Linear(model.output[-1].in_features,
                                     config.model.num_classes)
        else:
            model.output = nn.Sequential(
                nn.Dropout(dropout),
                nn.Linear(model.output[-1].in_features,
                          config.model.num_classes))
    elif config.model.arch == 'xception':
        if dropout < 0.1:
            model.output = nn.Linear(2048, config.model.num_classes)
        else:
            model.output = nn.Sequential(
                nn.Dropout(dropout), nn.Linear(2048, config.model.num_classes))
    elif config.model.arch.startswith('inception'):
        if dropout < 0.1:
            model.output = nn.Linear(model.output[-1].in_features,
                                     config.model.num_classes)
        else:
            model.output = nn.Sequential(
                nn.Dropout(dropout),
                nn.Linear(model.output[-1].in_features,
                          config.model.num_classes))
    else:
        if dropout < 0.1:
            model.output = nn.Linear(model.output.in_features,
                                     config.model.num_classes)
        else:
            model.output = nn.Sequential(
                nn.Dropout(dropout),
                nn.Linear(model.output.in_features, config.model.num_classes))

    model = torch.nn.DataParallel(model)
    return model
コード例 #15
0
def initialize_model(model_name,
                     embedding_dim,
                     feature_extracting,
                     use_pretrained=True):
    if model_name == "densenet161":
        model_ft = models.densenet161(pretrained=use_pretrained,
                                      memory_efficient=True)
        set_parameter_requires_grad(model_ft, feature_extracting)

        #print(model_ft)
        num_features = model_ft.classifier.in_features
        print(num_features)
        #print(embedding_dim)
        model_ft.classifier = nn.Linear(num_features, embedding_dim)
        #print(model_ft)
        if model_name == "densenet169":
            model_ft = models.densenet161(pretrained=use_pretrained,
                                          memory_efficient=True)
            set_parameter_requires_grad(model_ft, feature_extracting)

            #print(model_ft)
            num_features = model_ft.classifier.in_features
            print(num_features)
            #print(embedding_dim)
            model_ft.classifier = nn.Linear(num_features, embedding_dim)
            #print(model_ft)
    if model_name == "densenet121":
        model_ft = models.densenet121(pretrained=use_pretrained,
                                      memory_efficient=True)
        set_parameter_requires_grad(model_ft, feature_extracting)

        #print(model_ft)
        num_features = model_ft.classifier.in_features
        print(num_features)
        #print(embedding_dim)
        model_ft.classifier = nn.Linear(num_features, embedding_dim)
        #print(model_ft)
    if model_name == "densenet201":
        model_ft = models.densenet201(pretrained=use_pretrained,
                                      memory_efficient=True)
        set_parameter_requires_grad(model_ft, feature_extracting)

        #print(model_ft)
        num_features = model_ft.classifier.in_features
        print(num_features)
        #print(embedding_dim)
        model_ft.classifier = nn.Linear(num_features, embedding_dim)
        #print(model_ft)
    elif model_name == "resnet101":
        model_ft = models.resnet101(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extracting)
        num_features = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_features, embedding_dim)
        print(model_ft)
    elif model_name == "resnet34":
        model_ft = models.resnet34(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extracting)
        num_features = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_features, embedding_dim)
        print(model_ft)
    elif model_name == "adl_resnet50":
        model_ft = adl_resnet50(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extracting)
        num_features = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_features, embedding_dim)
        print(model_ft)

    elif model_name == "inceptionv3":
        model_ft = models.inception_v3(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extracting)
        num_features = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_features, embedding_dim)
    elif model_name == "seresnext":
        model_ft = se_resnext101_32x4d(num_classes=1000)
        set_parameter_requires_grad(model_ft, feature_extracting)
        num_features = model_ft.last_linear.in_features
        model_ft.last_linear = nn.Linear(num_features, embedding_dim)
    elif model_name == "seresnext50":
        model_ft = se_resnext50_32x4d(num_classes=1000)
        set_parameter_requires_grad(model_ft, feature_extracting)
        num_features = model_ft.last_linear.in_features
        model_ft.last_linear = nn.Linear(num_features, embedding_dim)
    elif model_name == "googlenet":
        model_ft = models.googlenet(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extracting)
        #print(model_ft)
        num_features = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_features, embedding_dim)

    elif model_name == "mobilenet2":
        model_ft = models.MobileNetV2(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extracting)

        num_features = model_ft.classifier[1].in_features
        model_ft.classifier[1] = nn.Linear(num_features, embedding_dim)
        print(model_ft)

    elif model_name == "mnasnet":
        model_ft = mnasnet1_0(pretrained=use_pretrained)
        #model_ft = MnasNet()
        set_parameter_requires_grad(model_ft, feature_extracting)

        print(model_ft.classifier[0])
        print(model_ft.classifier[1])

        num_features = model_ft.classifier[1].in_features
        print(num_features)
        print(embedding_dim)
        #model_ft.classifier[0] = nn.Dropout(p=0.2, inplace=False)
        model_ft.classifier[1] = nn.Linear(num_features, embedding_dim)
        print(model_ft)
    elif model_name == "adl_googlenet":
        model_ft = GoogLeNet()
        set_parameter_requires_grad(model_ft, feature_extracting)
        print(model_ft)
        num_features = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_features, embedding_dim)

    else:
        raise ValueError

    return model_ft
コード例 #16
0
import torchvision.datasets as datasets
from tensorboardX import SummaryWriter

DATA_DIR = "/home/krf/dataset/BALL/"
traindir = DATA_DIR + "train"
valdir = DATA_DIR + "val"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
BATCH_SIZE = 32
WORKERS = 4
START = 0
EPOCHS = 700
PRINT_FREQ = 10

# In[31]:

model = senet.se_resnext50_32x4d(num_classes=2)
#通过随机变化来进行数据增强
train_tf = TransformImage(model,
                          random_crop=True,
                          random_hflip=True,
                          random_vflip=True,
                          random_rotate=True,
                          preserve_aspect_ratio=True)
train_loader = torch.utils.data.DataLoader(
    #     datasets.ImageFolder(traindir, transforms.Compose([
    # #         transforms.RandomSizedCrop(max(model.input_size)),
    #         transforms.RandomHorizontalFlip(),
    #         transforms.ToTensor(),
    #         normalize,
    #     ])),
    datasets.ImageFolder(traindir, train_tf),