Ejemplo n.º 1
0
 def __init__(self, train_fe=False, feature_extraction_cnn='vgg', normalization=True, last_layer='', use_cuda=True):
     super(FeatureExtraction, self).__init__()
     self.normalization = normalization
     if feature_extraction_cnn == 'vgg':
         self.model = models.vgg16(pretrained=True)
         # keep feature extraction network up to indicated layer
         vgg_feature_layers=['conv1_1','relu1_1','conv1_2','relu1_2','pool1','conv2_1',
                      'relu2_1','conv2_2','relu2_2','pool2','conv3_1','relu3_1',
                      'conv3_2','relu3_2','conv3_3','relu3_3','pool3','conv4_1',
                      'relu4_1','conv4_2','relu4_2','conv4_3','relu4_3','pool4',
                      'conv5_1','relu5_1','conv5_2','relu5_2','conv5_3','relu5_3','pool5']
         if last_layer=='':
             last_layer = 'pool4'
         last_layer_idx = vgg_feature_layers.index(last_layer)
         self.model = nn.Sequential(*list(self.model.features.children())[:last_layer_idx+1])
     if feature_extraction_cnn == 'resnet101':
         self.model = models.resnet101(pretrained=True)
         resnet_feature_layers = ['conv1',
                                  'bn1',
                                  'relu',
                                  'maxpool',
                                  'layer1',
                                  'layer2',
                                  'layer3',
                                  'layer4']
         if last_layer=='':
             last_layer = 'layer3'
         last_layer_idx = resnet_feature_layers.index(last_layer)
         resnet_module_list = [self.model.conv1,
                               self.model.bn1,
                               self.model.relu,
                               self.model.maxpool,
                               self.model.layer1,
                               self.model.layer2,
                               self.model.layer3,
                               self.model.layer4]
         
         self.model = nn.Sequential(*resnet_module_list[:last_layer_idx+1])
     if feature_extraction_cnn == 'resnet101_v2':
         self.model = models.resnet101(pretrained=True)
         # keep feature extraction network up to pool4 (last layer - 7)
         self.model = nn.Sequential(*list(self.model.children())[:-3])
     if feature_extraction_cnn == 'densenet201':
         self.model = models.densenet201(pretrained=True)
         # keep feature extraction network up to denseblock3
         # self.model = nn.Sequential(*list(self.model.features.children())[:-3])
         # keep feature extraction network up to transitionlayer2
         self.model = nn.Sequential(*list(self.model.features.children())[:-4])
     if not train_fe:
         # freeze parameters
         for param in self.model.parameters():
             param.requires_grad = False
     # move to GPU
     if use_cuda:
         self.model = self.model.cuda()
    def __init__(self, num_classes=1):
        super(DinkNet101, self).__init__()

        filters = [256, 512, 1024, 2048]
        resnet = models.resnet101(pretrained=True)
        self.firstconv = resnet.conv1
        self.firstbn = resnet.bn1
        self.firstrelu = resnet.relu
        self.firstmaxpool = resnet.maxpool
        self.encoder1 = resnet.layer1
        self.encoder2 = resnet.layer2
        self.encoder3 = resnet.layer3
        self.encoder4 = resnet.layer4
        
        self.dblock = Dblock_more_dilate(2048)

        self.decoder4 = DecoderBlock(filters[3], filters[2])
        self.decoder3 = DecoderBlock(filters[2], filters[1])
        self.decoder2 = DecoderBlock(filters[1], filters[0])
        self.decoder1 = DecoderBlock(filters[0], filters[0])

        self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 4, 2, 1)
        self.finalrelu1 = nonlinearity
        self.finalconv2 = nn.Conv2d(32, 32, 3, padding=1)
        self.finalrelu2 = nonlinearity
        self.finalconv3 = nn.Conv2d(32, num_classes, 3, padding=1)
def resnet101(num_classes=1000, pretrained='imagenet'):
    """Constructs a ResNet-101 model.
    """
    model = models.resnet101(pretrained=False)
    if pretrained is not None:
        settings = pretrained_settings['resnet101'][pretrained]
        model = load_pretrained(model, num_classes, settings)
    return model
Ejemplo n.º 4
0
    def __init__(self, descriptor_name):
        super(Net, self).__init__()

        # if descriptor_name == 'vgg16':
        #     self.select = ['30']
        #     self.vgg16 = models.vgg16(pretrained=True)
        #     self.sequence = []
        #     for name, layer in self.vgg16.features._modules.items():
        #         self.sequence += [layer]
        #     for name, layer in self.vgg16.classifier._modules.items():
        #         self.sequence += [layer]
        #         break
        #     self.model = nn.Sequential(*self.sequence)

        if descriptor_name == 'vgg16':
            self.select = ['30']
            self.vgg16 = models.vgg16(pretrained=True)
            self.sequence = []
            for name, layer in self.vgg16.features._modules.items():
                self.sequence += [layer]
            for name, layer in self.vgg16.classifier._modules.items():
                if name == '6':
                    break
                self.sequence += [layer]
            layer = nn.Linear(4096, 10)
            # init.xavier_normal(layer.weight.data, gain = 1)
            self.sequence += [layer]

            self.model = nn.Sequential(*self.sequence)

        elif descriptor_name == 'vgg19':
            self.select = ['36']
            self.vgg19 = models.vgg19(pretrained=True)
            self.sequence = []
            for name, layer in self.vgg19.features._modules.items():
                self.sequence += [layer]
            for name, layer in self.vgg19.classifier._modules.items():
                self.sequence += [layer]
                break
            self.model = nn.Sequential(*self.sequence)

        elif descriptor_name == 'resnet50':
            self.select = ['avgpool']
            self.model = models.resnet50(pretrained=True)
            self.model.fc = nn.Linear(2048, 10)
            
        elif descriptor_name == 'resnet101':
            self.select = ['avgpool']
            self.model = models.resnet101(pretrained=True)

        elif descriptor_name == 'resnet152':
            self.select = ['avgpool']
            self.model = models.resnet152(pretrained=True)
            self.model.fc = nn.Linear(2048, 10)
Ejemplo n.º 5
0
 def __init__(self):
   super(ResNet101Fc, self).__init__()
   model_resnet101 = models.resnet101(pretrained=True)
   self.conv1 = model_resnet101.conv1
   self.bn1 = model_resnet101.bn1
   self.relu = model_resnet101.relu
   self.maxpool = model_resnet101.maxpool
   self.layer1 = model_resnet101.layer1
   self.layer2 = model_resnet101.layer2
   self.layer3 = model_resnet101.layer3
   self.layer4 = model_resnet101.layer4
   self.avgpool = model_resnet101.avgpool
   self.__in_features = model_resnet101.fc.in_features
Ejemplo n.º 6
0
def get_model(num_classes, model_type='resnet50'):
    if model_type == 'resnet50':
        model = resnet50(pretrained=True).cuda()
        model.fc = nn.Linear(model.fc.in_features, num_classes).cuda()
    elif model_type == 'resnet101':
        model = resnet101(pretrained=True).cuda()
        model.fc = nn.Linear(model.fc.in_features, num_classes).cuda()
    elif model_type == 'resnet152':
        model = resnet152(pretrained=True).cuda()
        model.fc = nn.Linear(model.fc.in_features, num_classes).cuda()
    elif model_type == 'densenet121':
        model = densenet121(pretrained=True).cuda()
        model.classifier = nn.Linear(model.classifier.in_features, num_classes).cuda()
    elif model_type == 'densenet161':
        model = densenet161(pretrained=True).cuda()
        model.classifier = nn.Linear(model.classifier.in_features, num_classes).cuda()
    elif model_type == 'densenet201':
        model = densenet201(pretrained=True).cuda()
        model.classifier = nn.Linear(model.classifier.in_features, num_classes).cuda()
    return model
    def __init__(self, requires_grad=False, pretrained=True, num=18):
        super(resnet, self).__init__()
        if(num==18):
            self.net = models.resnet18(pretrained=pretrained)
        elif(num==34):
            self.net = models.resnet34(pretrained=pretrained)
        elif(num==50):
            self.net = models.resnet50(pretrained=pretrained)
        elif(num==101):
            self.net = models.resnet101(pretrained=pretrained)
        elif(num==152):
            self.net = models.resnet152(pretrained=pretrained)
        self.N_slices = 5

        self.conv1 = self.net.conv1
        self.bn1 = self.net.bn1
        self.relu = self.net.relu
        self.maxpool = self.net.maxpool
        self.layer1 = self.net.layer1
        self.layer2 = self.net.layer2
        self.layer3 = self.net.layer3
        self.layer4 = self.net.layer4
    def __init__(self, encoder_depth, pretrained=True):
        super().__init__()

        if encoder_depth == 34:
            self.encoder = models.resnet34(pretrained=pretrained)
            bottom_channel_nr = 512
        elif encoder_depth == 50:
            self.encoder = models.resnet50(pretrained=pretrained)
            bottom_channel_nr = 2048
        elif encoder_depth == 101:
            self.encoder = models.resnet101(pretrained=pretrained)
            bottom_channel_nr = 2048
        elif encoder_depth == 152:
            self.encoder = models.resnet152(pretrained=pretrained)
            bottom_channel_nr = 2048
        else:
            raise Exception('ResNet depth must be in {34, 50, 101, 152}')

        self.pool = nn.MaxPool2d(2, 2)

        self.relu = nn.ReLU(inplace=True)

        self.conv1 = nn.Sequential(self.encoder.conv1, self.encoder.bn1,
                                   self.encoder.relu, self.pool)
        self.conv2 = self.encoder.layer1
        self.conv3 = self.encoder.layer2
        self.conv4 = self.encoder.layer3
        self.conv5 = self.encoder.layer4

        self.conv6 = nn.Conv2d(bottom_channel_nr,
                               256,
                               kernel_size=3,
                               stride=2,
                               padding=1)
        self.conv7 = nn.Conv2d(256, 256, kernel_size=3, stride=2, padding=1)

        # Lateral layers
        self.latlayer1 = nn.Conv2d(bottom_channel_nr,
                                   256,
                                   kernel_size=1,
                                   stride=1,
                                   padding=0)
        self.latlayer2 = nn.Conv2d(bottom_channel_nr // 2,
                                   256,
                                   kernel_size=1,
                                   stride=1,
                                   padding=0)
        self.latlayer3 = nn.Conv2d(bottom_channel_nr // 4,
                                   256,
                                   kernel_size=1,
                                   stride=1,
                                   padding=0)

        # Top-down layers
        self.toplayer1 = nn.Conv2d(256,
                                   256,
                                   kernel_size=3,
                                   stride=1,
                                   padding=1)
        self.toplayer2 = nn.Conv2d(256,
                                   256,
                                   kernel_size=3,
                                   stride=1,
                                   padding=1)
        self.toplayer3 = nn.Conv2d(256,
                                   256,
                                   kernel_size=3,
                                   stride=1,
                                   padding=1)
Ejemplo n.º 9
0
    def build(self):
        # Transform for input images.
        input_size = 299 if self.predictor_name == 'inception_v3' else 224
        self.transform = T.Compose([
            T.ToPILImage(),
            T.Resize((input_size, input_size)),
            T.ToTensor(),
            T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])

        if self.predictor_name == 'alexnet':
            model = M.alexnet(pretrained=True)
        elif self.predictor_name == 'vgg11':
            model = M.vgg11(pretrained=True)
        elif self.predictor_name == 'vgg13':
            model = M.vgg13(pretrained=True)
        elif self.predictor_name == 'vgg16':
            model = M.vgg16(pretrained=True)
        elif self.predictor_name == 'vgg19':
            model = M.vgg19(pretrained=True)
        elif self.predictor_name == 'vgg11_bn':
            model = M.vgg11_bn(pretrained=True)
        elif self.predictor_name == 'vgg13_bn':
            model = M.vgg13_bn(pretrained=True)
        elif self.predictor_name == 'vgg16_bn':
            model = M.vgg16_bn(pretrained=True)
        elif self.predictor_name == 'vgg19_bn':
            model = M.vgg19_bn(pretrained=True)
        elif self.predictor_name == 'googlenet':
            model = M.googlenet(pretrained=True, aux_logits=False)
        elif self.predictor_name == 'inception_v3':
            model = M.inception_v3(pretrained=True, aux_logits=False)
        elif self.predictor_name == 'resnet18':
            model = M.resnet18(pretrained=True)
        elif self.predictor_name == 'resnet34':
            model = M.resnet34(pretrained=True)
        elif self.predictor_name == 'resnet50':
            model = M.resnet50(pretrained=True)
        elif self.predictor_name == 'resnet101':
            model = M.resnet101(pretrained=True)
        elif self.predictor_name == 'resnet152':
            model = M.resnet152(pretrained=True)
        elif self.predictor_name == 'resnext50':
            model = M.resnext50_32x4d(pretrained=True)
        elif self.predictor_name == 'resnext101':
            model = M.resnext101_32x8d(pretrained=True)
        elif self.predictor_name == 'wideresnet50':
            model = M.wide_resnet50_2(pretrained=True)
        elif self.predictor_name == 'wideresnet101':
            model = M.wide_resnet101_2(pretrained=True)
        elif self.predictor_name == 'densenet121':
            model = M.densenet121(pretrained=True)
        elif self.predictor_name == 'densenet169':
            model = M.densenet169(pretrained=True)
        elif self.predictor_name == 'densenet201':
            model = M.densenet201(pretrained=True)
        elif self.predictor_name == 'densenet161':
            model = M.densenet161(pretrained=True)
        else:
            raise NotImplementedError(f'Unsupported architecture '
                                      f'`{self.predictor_name}`!')

        model.eval()

        if self.imagenet_logits:
            self.net = model
            self.feature_dim = (1000, )
            return

        if self.architecture_type == 'AlexNet':
            layers = list(model.features.children())
            if not self.spatial_feature:
                layers.append(nn.Flatten())
                self.feature_dim = (256 * 6 * 6, )
            else:
                self.feature_dim = (256, 6, 6)
        elif self.architecture_type == 'VGG':
            layers = list(model.features.children())
            if not self.spatial_feature:
                layers.append(nn.Flatten())
                self.feature_dim = (512 * 7 * 7, )
            else:
                self.feature_dim = (512, 7, 7)
        elif self.architecture_type == 'Inception':
            if self.predictor_name == 'googlenet':
                final_res = 7
                num_channels = 1024
                layers = list(model.children())[:-3]
            elif self.predictor_name == 'inception_v3':
                final_res = 8
                num_channels = 2048
                layers = list(model.children())[:-1]
                layers.insert(3, nn.MaxPool2d(kernel_size=3, stride=2))
                layers.insert(6, nn.MaxPool2d(kernel_size=3, stride=2))
            else:
                raise NotImplementedError(
                    f'Unsupported Inception architecture '
                    f'`{self.predictor_name}`!')
            if not self.spatial_feature:
                layers.append(nn.AdaptiveAvgPool2d((1, 1)))
                layers.append(nn.Flatten())
                self.feature_dim = (num_channels, )
            else:
                self.feature_dim = (num_channels, final_res, final_res)
        elif self.architecture_type == 'ResNet':
            if self.predictor_name in ['resnet18', 'resnet34']:
                num_channels = 512
            elif self.predictor_name in [
                    'resnet50', 'resnet101', 'resnet152', 'resnext50',
                    'resnext101', 'wideresnet50', 'wideresnet101'
            ]:
                num_channels = 2048
            else:
                raise NotImplementedError(f'Unsupported ResNet architecture '
                                          f'`{self.predictor_name}`!')
            if not self.spatial_feature:
                layers = list(model.children())[:-1]
                layers.append(nn.Flatten())
                self.feature_dim = (num_channels, )
            else:
                layers = list(model.children())[:-2]
                self.feature_dim = (num_channels, 7, 7)
        elif self.architecture_type == 'DenseNet':
            if self.predictor_name == 'densenet121':
                num_channels = 1024
            elif self.predictor_name == 'densenet169':
                num_channels = 1664
            elif self.predictor_name == 'densenet201':
                num_channels = 1920
            elif self.predictor_name == 'densenet161':
                num_channels = 2208
            else:
                raise NotImplementedError(f'Unsupported DenseNet architecture '
                                          f'`{self.predictor_name}`!')
            layers = list(model.features.children())
            if not self.spatial_feature:
                layers.append(nn.ReLU(inplace=True))
                layers.append(nn.AdaptiveAvgPool2d((1, 1)))
                layers.append(nn.Flatten())
                self.feature_dim = (num_channels, )
            else:
                self.feature_dim = (num_channels, 7, 7)
        else:
            raise NotImplementedError(f'Unsupported architecture type '
                                      f'`{self.architecture_type}`!')
        self.net = nn.Sequential(*layers)
Ejemplo n.º 10
0
 def __init__(self):
     super(RGBNet, self).__init__()
     resnet101 = models.resnet101(pretrained=True)
     modules = list(resnet101.children())[:-1]
     self.model = nn.Sequential(*modules)
     self.model.cuda()
def resnet101(args):
    return models.resnet101(pretrained=args.pretrained, num_classes=args.n_classes)
Ejemplo n.º 12
0
    def __init__(self, images):
        self.images = images

        # pre-trained model
        self.resnet = models.resnet101(pretrained=True)
Ejemplo n.º 13
0
def main():
    data_transforms = transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])

    data_dir = '/home/jsk/s/prcv/dataset/v2'
    image_datasets = datasets.ImageFolder(os.path.join(data_dir, 'rest_test'), data_transforms)
    testloader = torch.utils.data.DataLoader(image_datasets, batch_size=512, shuffle=False, num_workers=32)
              
    dataset_sizes = len(image_datasets)
    class_names = image_datasets.classes

    device = torch.device("cuda:6" if torch.cuda.is_available() else "cpu")

    model = {'resnet50':models.resnet50(pretrained=False), 
	    'resnet101':models.resnet101(pretrained=False), 
	    'resnet152':models.resnet152(pretrained=False)}

    param = {'101_64':'resnet101_epoch20_ft_batch64.pkl',
	'101_32':'resnet101_epoch20_ft_batch32.pkl',
	'101_16':'resnet101_epoch26_ft_batch16.pkl',
	'101_8':'resnet101_epoch26_ft_batch8.pkl',
	'101_4':'resnet101_epoch26_ft_batch4.pkl',
	'101_2':'resnet101_epoch26_ft_batch2.pkl',
	'50_128':'resnet50_epoch30_ft_batch128.pkl',  
	'50_64':'resnet50_epoch30_ft_batch64.pkl',
	'50_32':'resnet50_epoch30_ft_batch32.pkl',
	'50_16':'resnet50_epoch30_ft_batch16.pkl',
	'50_8':'resnet50_epoch30_ft_batch8.pkl',
	'50_4':'resnet50_epoch30_ft_batch4.pkl',
	'50_2':'resnet50_epoch30_ft_batch2.pkl',
	'50_1':'resnet50_epoch30_ft_batch1.pkl'}

    model_test = model[model_dir]
    num_ftrs = model_test.fc.in_features
    model_test.fc = nn.Linear(num_ftrs, 205)
    print(model_test)
    print('test param %s of model %s' % (param[param_str],model_dir))

    param_dir = os.path.join(root,param[param_str])
    model_test.load_state_dict(torch.load(param_dir))
    model_test = model_test.to(device)
    model_test.eval()

    correct = 0
    correct_1 = 0
    correct_5 = 0
    top1 = 0
    top5 = 0
    total = 0
    batch = 0

    with torch.no_grad():
        for inputs, labels in testloader:
            inputs = inputs.to(device)
            labels = labels.to(device)
            outputs = model_test(inputs)

            total += labels.size(0)
	
            dcorrect_1, dcorrect_5 = accuracy(outputs,labels,topk=(1,5))
            correct_1 += dcorrect_1
            correct_5 += dcorrect_5
            top1 = correct_1.float()/total
            top5 = correct_5.float()/total

            batch += 1
	
            print('batch %d top1 accuracy: %.3f %% top5 accuracy: %.3f %%' % (batch,100*top1,100*top5))
    print('Accuracy of the %s on the %d test images: top1 %.3f %%  top5 %.3f %%' % (param[param_str],total,100*top1,100*top5))
    def __init__(self,
                 train_fe=False,
                 feature_extraction_cnn='vgg19',
                 normalization=True,
                 last_layer='',
                 use_cuda=True):
        super(FeatureExtraction, self).__init__()
        self.normalization = normalization

        # multiple extracting layers
        last_layer = last_layer.split(',')

        if feature_extraction_cnn == 'vgg16':
            self.model = models.vgg16(pretrained=True)
            # keep feature extraction network up to indicated layer
            vgg_feature_layers = [
                'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1',
                'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1',
                'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'pool3', 'conv4_1',
                'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3', 'pool4',
                'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',
                'relu5_3', 'pool5'
            ]

            start_index = 0
            self.model_list = []
            for l in last_layer:
                if l == '':
                    l = 'pool4'
                layer_idx = vgg_feature_layers.index(l)
                assert layer_idx >= start_index, 'layer order wrong!'
                model = nn.Sequential(*list(self.model.features.children())
                                      [start_index:layer_idx + 1])
                self.model_list.append(model)
                start_index = layer_idx + 1

        if feature_extraction_cnn == 'vgg19':
            self.model = models.vgg19(pretrained=True)
            # keep feature extraction network up to indicated layer
            vgg_feature_layers = [
                'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1',
                'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1',
                'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'conv3_4',
                'relu3_4', 'pool3', 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2',
                'conv4_3', 'relu4_3', 'conv4_4', 'relu4_4', 'pool4', 'conv5_1',
                'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3',
                'conv5_4', 'relu5_4', 'pool5'
            ]

            # vgg_output_dim = [64, 64, 64, 64, 64,
            #                   128, 128, 128, 128, 128,
            #                   256, 256, 256, 256, 256, 256, 256, 256, 256,
            #                   512, 512, 512, 512, 512, 512, 512, 512, 512,
            #                   512, 512, 512, 512, 512, 512, 512, 512, 512]

            start_index = 0
            self.model_list = []
            # self.out_dim = 0
            for l in last_layer:
                if l == '':
                    l = 'relu5_4'
                layer_idx = vgg_feature_layers.index(l)
                assert layer_idx >= start_index, 'layer order wrong!'
                # self.out_dim += vgg_output_dim[layer_idx]
                model = nn.Sequential(*list(self.model.features.children())
                                      [start_index:layer_idx + 1])
                self.model_list.append(model)
                start_index = layer_idx + 1

        if feature_extraction_cnn in ['resnet18', 'resnet101']:

            if feature_extraction_cnn == 'resnet18':
                self.model = models.resnet18(pretrained=True)
            else:
                self.model = models.resnet101(pretrained=True)

            resnet_feature_layers = [
                'conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2',
                'layer3', 'layer4'
            ]

            resnet_module_list = [
                self.model.conv1, self.model.bn1, self.model.relu,
                self.model.maxpool, self.model.layer1, self.model.layer2,
                self.model.layer3, self.model.layer4
            ]

            start_index = 0
            self.model_list = []
            for l in last_layer:
                if l == '':
                    l = 'layer3'
                layer_idx = resnet_feature_layers.index(l)
                assert layer_idx >= start_index, 'layer order wrong!'
                model = nn.Sequential(
                    *resnet_module_list[start_index:layer_idx + 1])
                self.model_list.append(model)
                start_index = layer_idx + 1

        if not train_fe:
            # freeze parameters
            for param in self.model.parameters():
                param.requires_grad = False
        # move to GPU
        if use_cuda:
            self.model_list = [model.cuda() for model in self.model_list]
Ejemplo n.º 15
0
def main():
    parser = argparse.ArgumentParser(
        description='Compute TCGA features from SimCLR embedder')
    parser.add_argument('--num_classes',
                        default=2,
                        type=int,
                        help='Number of output classes')
    parser.add_argument('--num_feats',
                        default=512,
                        type=int,
                        help='Feature size')
    parser.add_argument('--batch_size',
                        default=128,
                        type=int,
                        help='Batch size of dataloader')
    parser.add_argument('--num_workers',
                        default=0,
                        type=int,
                        help='Number of threads for datalodaer')
    parser.add_argument('--dataset',
                        default='wsi-tcga-lung',
                        type=str,
                        help='Nanme of dataset')
    parser.add_argument('--backbone',
                        default='resnet18',
                        type=str,
                        help='Embedder backbone')
    parser.add_argument('--magnification',
                        default='20x',
                        type=str,
                        help='Magnification to compute features')
    parser.add_argument('--weights',
                        default=None,
                        type=str,
                        help='Folder of the pretrained weights, simclr/runs/*')
    args = parser.parse_args()

    if args.backbone == 'resnet18':
        resnet = models.resnet18(pretrained=False,
                                 norm_layer=nn.InstanceNorm2d)
        num_feats = 512
    if args.backbone == 'resnet34':
        resnet = models.resnet34(pretrained=False,
                                 norm_layer=nn.InstanceNorm2d)
        num_feats = 512
    if args.backbone == 'resnet50':
        resnet = models.resnet50(pretrained=False,
                                 norm_layer=nn.InstanceNorm2d)
        num_feats = 2048
    if args.backbone == 'resnet101':
        resnet = models.resnet101(pretrained=False,
                                  norm_layer=nn.InstanceNorm2d)
        num_feats = 2048
    for param in resnet.parameters():
        param.requires_grad = False
    resnet.fc = nn.Identity()
    i_classifier = mil.IClassifier(resnet,
                                   num_feats,
                                   output_class=args.num_classes).cuda()

    if args.weights is not None:
        weight_path = os.path.join('simclr', 'runs', args.weights,
                                   'checkpoints', 'model.pth')
    else:
        weight_path = glob.glob('simclr/runs/*/checkpoints/*.pth')[-1]
    state_dict_weights = torch.load(weight_path)

    try:
        state_dict_weights.pop('module.l1.weight')
        state_dict_weights.pop('module.l1.bias')
        state_dict_weights.pop('module.l2.weight')
        state_dict_weights.pop('module.l2.bias')
    except:
        state_dict_weights.pop('l1.weight')
        state_dict_weights.pop('l1.bias')
        state_dict_weights.pop('l2.weight')
        state_dict_weights.pop('l2.bias')
    state_dict_init = i_classifier.state_dict()
    new_state_dict = OrderedDict()
    for (k, v), (k_0, v_0) in zip(state_dict_weights.items(),
                                  state_dict_init.items()):
        name = k_0
        new_state_dict[name] = v
    i_classifier.load_state_dict(new_state_dict, strict=False)

    if args.dataset == 'wsi-tcga-lung':
        bags_path = os.path.join('WSI', 'TCGA-lung', 'pyramid', '*', '*')
    if args.dataset == 'wsi-tcga-lung-single':
        bags_path = os.path.join('WSI', 'TCGA-lung', 'single', '*', '*')
    feats_path = os.path.join('datasets', 'wsi-tcga-lung')
    os.makedirs(feats_path, exist_ok=True)
    bags_list = glob.glob(bags_path)
    compute_feats(args, bags_list, i_classifier, feats_path)
Ejemplo n.º 16
0
def process_finetuning(params):
    num_classes = len(wrgbd51.class_names)
    # uncomment saving codes after param search
    save_dir = params.dataset_path + params.features_root + RunSteps.FINE_TUNING + '/'
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    best_model_file = save_dir + params.net_model + '_' + params.data_type + '_split_' + str(params.split_no) + \
                      '_best_checkpoint.pth'

    # device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    device = torch.device("cuda")

    if params.net_model == Models.DenseNet121:
        model_ft = models.densenet121(pretrained=True)
        num_ftrs = model_ft.classifier.in_features
        model_ft.classifier = nn.Linear(num_ftrs, num_classes)
    elif params.net_model in (Models.ResNet50, Models.ResNet101):
        if params.net_model == Models.ResNet50:
            model_ft = models.resnet50(pretrained=True)
        else:
            model_ft = models.resnet101(pretrained=True)

        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)
    else:  # params.net_model == 'alexnet'
        if params.net_model == Models.AlexNet:
            model_ft = models.alexnet(pretrained=True)
        else:
            model_ft = models.vgg16_bn(pretrained=True)
        num_ftrs = model_ft.classifier[6].in_features
        model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)

    model_ft = model_ft.to(device)

    # following two lines are added for the second stage of the two-stage fine-tuning
    # state_dict = torch.load(best_model_file)
    # model_ft.load_state_dict(state_dict)

    # Parameters of newly constructed modules have "requires_grad=True" by default
    set_parameters_requires_grad(model_ft,
                                 params.net_model,
                                 train_only_one_layer=False)

    data_form = get_data_transform(params.data_type)

    training_set = WashingtonDataset(params,
                                     phase='train',
                                     loader=custom_loader,
                                     transform=data_form)
    train_loader = torch.utils.data.DataLoader(training_set,
                                               params.batch_size,
                                               shuffle=True)

    val_set = WashingtonDataset(params,
                                phase='test',
                                loader=custom_loader,
                                transform=data_form)
    val_loader = torch.utils.data.DataLoader(val_set,
                                             params.batch_size,
                                             shuffle=False)
    data_loaders = {'train': train_loader, 'val': val_loader}

    # first stage of finetuning: finetune the last layer, freeze the rest of the network
    model_ft = fine_tuning(params, model_ft, data_loaders, device, stage=1)

    # report_cpu_stats()
    # report_gpu_memory()
    torch.save(model_ft.state_dict(),
               best_model_file)  # uncomment this line after param search
Ejemplo n.º 17
0
def train(i, train_acc, train_loss):
    data_transform = transforms.Compose([
        #transforms.Resize((224,224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    #print(DATASET_ROOT)
    all_data_set = IMAGE_Dataset(Path(DATASET_ROOT1), data_transform)

    #print('set:',len(train_set))
    indices = list(range(len(all_data_set)))
    #print('old',indices)
    np.random.seed(1)
    np.random.shuffle(indices)
    #print('new',indices)
    split = math.ceil(len(all_data_set) *
                      0.1)  # extract 10% dataset as test-set
    train_idx, valid_idx = indices[split:], indices[:split]
    train_sampler = SubsetRandomSampler(train_idx)
    test_sampler = SubsetRandomSampler(valid_idx)
    #print('test')
    #print(test_sampler)
    #train_set, test_set = torch.utils.data.random_split(train_set, [400, 115])
    print('train_set:', len(train_sampler), 'test_set:', len(test_sampler))

    train_data_loader = DataLoader(dataset=all_data_set,
                                   batch_size=BATCH_SIZE,
                                   shuffle=False,
                                   num_workers=0,
                                   sampler=train_sampler)

    test_data_loader = DataLoader(dataset=all_data_set,
                                  batch_size=BATCH_SIZE,
                                  shuffle=False,
                                  num_workers=0,
                                  sampler=test_sampler)

    #print(train_set.num_classes)

    if i == 1:
        model = models.resnet101(pretrained=True)
        #fc_features=model.fc.in_features
        #model.fc=nn.Linear(fc_features,5)
        f = lambda x: math.ceil(x / 32 - 7 + 1)
        my_output_module = nn.Sequential(
            nn.Linear(f(1024) * f(1024) * 2048, REG_OUTPUT), nn.Softmax(dim=1))

        model.fc = my_output_module
        # model.fc=nn.Linear(f(256)*f(256)*2048, REG_OUTPUT)
        model = model.cuda()
        model = nn.DataParallel(model, device_ids=DEVICE_IDS)
    if i != 1:
        model = torch.load(PATH_TO_WEIGHTS)
    '''if i==1:
        model=VGG16(num_classes=all_data_set.num_classes)
    elif i!=1:
        model=torch.load(PATH_TO_WEIGHTS)'''
    # model = model.cuda(CUDA_DEVICES)
    model.train()  #train

    best_model_params = copy.deepcopy(model.state_dict())  #複製參數
    best_acc = 0.0
    num_epochs = EPOCH_SIZE
    criterion = nn.L1Loss()
    criterion2 = nn.MSELoss()
    optimizer = torch.optim.SGD(params=model.parameters(),
                                lr=0.001,
                                momentum=0.9)

    train_loss = []
    train_loss2 = []
    best_loss = math.inf

    for epoch in range(num_epochs):
        print(f'Epoch: {epoch + 1}/{num_epochs}')
        print('-' * len(f'Epoch: {epoch + 1}/{num_epochs}'))

        training_loss = 0.0
        training_loss2 = 0.0
        # training_corrects = 0

        for i, (inputs, labels) in enumerate(train_data_loader):
            inputs = inputs.cuda()  #CUDA_DEVICES)
            labels = labels.cuda()  #CUDA_DEVICES)

            optimizer.zero_grad()

            outputs = model(inputs)
            # _ , preds = torch.max(outputs.data, 1)
            outputs = outputs.squeeze(1)
            predictions = outputs.data.cpu().numpy()

            predictions = np.array([[round(x, 2) for x in row]
                                    for row in predictions])
            loss = criterion(outputs, labels)
            loss2 = criterion2(outputs * 100, labels * 100)
            loss.backward()
            optimizer.step()
            # sm = F.softmax(outputs,dim=1)
            # print("======== Softmax ========")
            # print(sm.data)
            # print("=========================")
            #print("preds:"+str(preds))

            if i * BATCH_SIZE % 1000 == 0:

                print(
                    "\n\n||||||||||||||||||||| BATCH-%d |||||||||||||||||||||\n"
                    % i)
                print("\n=================== Labels =====================\n")
                print(labels)
                print("\n================= Predictions ==================\n")
                print(predictions)
                print("\n================= Batch Loss ===================\n")
                print(f"Training: {loss.data:.2f}")
                print(f"MSELoss : {loss2.data:.2f}\n")
                print("\n================= Epoch Loss ===================\n")
                print(f'Training:', train_loss)
                print(f'MSEloss :', train_loss2)

            progress = i * BATCH_SIZE / len(train_sampler)
            print(
                f"[Training Progress]: {progress:.4f}% [Batch Loss]: {loss2.data:.2f}",
                end='\r')

            training_loss += loss.item() * inputs.size(0)
            training_loss2 += loss2.item() * inputs.size(0)

        # Calulate Loss and MSELoss in current epoch
        training_loss = training_loss / len(train_sampler)
        training_loss2 = training_loss2 / len(train_sampler)

        # train_acc.append(training_acc)        #save each 10 epochs accuracy
        train_loss.append(int(training_loss))
        train_loss2.append(int(training_loss2))

        print(
            "########################\nFinish Epoch\n#########################\n"
        )

        if training_loss < best_loss:

            best_loss = training_loss
            best_model_params = copy.deepcopy(model.state_dict())
            torch.save(model, RB_PATH_TO_WEIGHTS)  #save model 存整個model

        print("Best Loss: %.2f" % best_loss)

    model.load_state_dict(
        best_model_params)  #model load new best parmsmodel載入參數
    torch.save(model, PATH_TO_WEIGHTS)  #save model 存整個model

    return ([], train_loss, train_loss2, test_data_loader)
Ejemplo n.º 18
0
    def __init__(self, seg2d_path, img_size):
        super(ImageGen3DNet, self).__init__()

        self.seg2d = Seg2DNet(model=models.resnet101(False), num_classes=12)

        chpo = torch.load(seg2d_path)
        self.seg2d.load_state_dict(chpo['state_dict'], strict=False)
        print "=> seg2d loaded checkpoint '{}'".format(seg2d_path)

        self.seq1 = nn.Sequential(nn.Conv3d(64, 64, 3, padding=1, bias=False),
                                  nn.BatchNorm3d(64), nn.ReLU(inplace=True),
                                  nn.Conv3d(64, 64, 3, padding=1, bias=False),
                                  nn.BatchNorm3d(64))
        self.seq2 = nn.Sequential(nn.Conv3d(64, 64, 3, padding=1, bias=False),
                                  nn.BatchNorm3d(64), nn.ReLU(inplace=True),
                                  nn.Conv3d(64, 64, 3, padding=1, bias=False),
                                  nn.BatchNorm3d(64))
        self.relu = nn.ReLU(inplace=True)
        self.ASPP3D1 = ASPP3D(64, 64, [1, 2, 3])
        self.ASPP3D2 = ASPP3D(64, 64, [1, 2, 3])
        self.ASPP3Dout = nn.Sequential(
            nn.Conv3d(256, 128, 1, bias=False),
            nn.BatchNorm3d(128),
            nn.ReLU(inplace=True),
            nn.Conv3d(128, 128, 1, bias=False),
            nn.BatchNorm3d(128),
            nn.ReLU(inplace=True),
            nn.Conv3d(128, 12, 1),  # For LateFusion
            nn.Conv3d(12, 12, 3, padding=1)  # For LateFusion
        )

        self.img_required_size = (640, 480)
        self.img_size = img_size
        if cmp(self.img_required_size, self.img_size) != 0:
            x = np.array(range(self.img_required_size[0]), dtype=np.float32)
            y = np.array(range(self.img_required_size[1]), dtype=np.float32)
            scale = 1.0 * self.img_size[0] / self.img_required_size[0]
            x = x * scale + 0.5
            y = y * scale + 0.5
            x = x.astype(np.int64)
            y = y.astype(np.int64)
            if x[self.img_required_size[0] - 1] >= self.img_size[0]:
                x[self.img_required_size[0] - 1] = self.img_size[0] - 1
            if y[self.img_required_size[1] - 1] >= self.img_size[1]:
                y[self.img_required_size[1] - 1] = self.img_size[1] - 1
            xx = np.ones(
                (self.img_required_size[1], self.img_required_size[0]),
                dtype=np.int64)
            yy = np.ones(
                (self.img_required_size[1], self.img_required_size[0]),
                dtype=np.int64)
            xx[:] = x
            yy[:] = y.reshape(
                (self.img_required_size[1], 1)) * self.img_size[0]
            image_mapping1 = (xx + yy).reshape(-1)
        else:
            image_mapping1 = np.array(range(self.img_required_size[0] *
                                            self.img_required_size[1]),
                                      dtype=np.int64)
        self.register_buffer(
            'image_mapping',
            torch.autograd.Variable(torch.LongTensor(image_mapping1),
                                    requires_grad=False))

        self.dim_inc_dim = 64
Ejemplo n.º 19
0
 def test_resnet101(self):
     process_model(models.resnet101(self.pretrained), self.image, _C_tests.forward_resnet101, 'Resnet101')
Ejemplo n.º 20
0
    def training(self):
        self.tr_loss_track, self.val_loss_track, self.tr_acc_track, self.val_acc_track, self.val_acc_history = [], [], [], [], []

        print('training', self.model_type, self.optim_type, self.learning_rate)
        # Defining pretrained model
        if self.model_type == "vgg19":
            model = models.vgg19(pretrained=True)
            model.classifier[6] = nn.Linear(4096, self.class_labels)
        elif self.model_type == "resnet50":
            model = models.resnet50(pretrained=True)
            num_features = model.fc.in_features
            model.fc = nn.Linear(num_features, self.class_labels)
        elif self.model_type == "resnet101":
            model = models.resnet101(pretrained=True)
            num_features = model.fc.in_features
            model.fc = nn.Linear(num_features, self.class_labels)
        elif self.model_type == "resnet151":
            model = models.resnet101(pretrained=True)
            num_features = model.fc.in_features
            model.fc = nn.Linear(num_features, self.class_labels)
        elif self.model_type == "googlenet":
            model = models.googlenet(pretrained=True)
            model.classifier = nn.Linear(1000, self.class_labels)
        elif self.model_type == "densenet121":
            model = models.densenet121(pretrained=True)
            num_features = model.classifier.in_features
            model.classifier = nn.Linear(num_features, self.class_labels)
        # #Without fine tuning
        elif self.model_type == "un_vgg19":
            # print("unpretrained1")
            model = models.vgg19(pretrained=False)
            model.classifier[6] = nn.Linear(4096, self.class_labels)
        # set pretrained =False for models
        elif self.model_type == "un_resnet50":
            # print("unpretrained2")
            model = models.resnet50(pretrained=False)
            num_features = model.fc.in_features
            model.fc = nn.Linear(num_features, self.class_labels)

        model = model.to(self.device)
        criterian = nn.CrossEntropyLoss()

        if self.optim_type == "adam":
            optimizer = torch.optim.Adam(model.parameters(),
                                         lr=self.learning_rate)
        elif self.optim_type == "sgd":
            optimizer = torch.optim.SGD(model.parameters(),
                                        lr=self.learning_rate,
                                        momentum=0.9)

        # training
        start_time = time.time()
        best_loss = float('inf')
        best_model = None
        best_acc = 0.0
        # best_model_wts = copy.deepcopy(model.state_dict())

        for epoch in range(1, self.num_epochs + 1):
            print('epoch {}/{}'.format(epoch, self.num_epochs))
            for t in ['train', 'val']:
                num_correct = 0.0
                num_samples = 0
                r_loss = 0.0
                running_corrects = 0

                if t == 'train':
                    # training mode
                    model.train()
                else:
                    # evaluate model
                    model.eval()

                count = 0
                for data in self.dataloaders[t]:
                    count += 1
                    # data has three types files, labels and filename
                    files, labels, filename = data

                    files = Variable(files.to(self.device))  # to gpu or cpu
                    labels = Variable(labels.to(self.device))

                    optimizer.zero_grad(
                    )  # clearning old gradient from last step

                    with torch.set_grad_enabled(t == 'train'):
                        pred = model(files)
                        # loss computation
                        loss = criterian(pred, labels)

                        _, prediction = torch.max(pred, 1)

                        # backprop gradients at training time
                        if t == 'train':
                            loss.backward()
                            optimizer.step()

                        # print(t +' iteration {}:loss {}  '.format(count,r_loss))
                    # statistics
                    r_loss += loss.item() * files.size(0)
                    print(t + ' iteration {}:loss {}  '.format(count, r_loss))
                    running_corrects += torch.sum(prediction == labels.data)
                epoch_loss = r_loss / len(self.dataloaders[t].dataset)
                epoch_acc = running_corrects.double() / len(
                    self.dataloaders[t].dataset)

                print('{} Loss: {:.4f} Acc: {:.4f}'.format(
                    t, epoch_loss, epoch_acc))

                # print(t +' epoch {}:loss {}  '.format(epoch,r_loss))

                # deep copy the model
                # print('epoch_acc',epoch_acc,'best_acc',best_acc)
                if t == 'val' and epoch_acc > best_acc:
                    print('inside check point if')
                    best_acc = epoch_acc
                    best_model_wts = copy.deepcopy(model.state_dict())
                    self.checkpoint(best_model_wts, best_loss, epoch,
                                    self.learning_rate)
                if t == 'val':
                    self.val_acc_history.append(epoch_acc.item())
                    self.val_loss_track.append(epoch_loss)

                if t == 'train':
                    self.tr_loss_track.append(epoch_loss)
                    self.tr_acc_track.append(epoch_acc.item())

        time_elapsed = time.time() - start_time
        print('Training complete in {:.0f}m {:.0f}s'.format(
            time_elapsed // 60, time_elapsed % 60))
        print('Best val Acc: {:4f}'.format(best_acc))

        # load best model weights
        # model.load_state_dict(best_model_wts)
        # updating best model in checkpoint

        self.plot_losses_both(self.tr_loss_track, self.val_loss_track)
        self.plot_loss_Val(self.val_loss_track)
        self.plot_loss_Accu(self.val_acc_history)
Ejemplo n.º 21
0
        if self.transforms is not None:
            image = self.transforms(image)

        return image, label


train_dataset = LandmarkDataset(train_transforms)
train_loader = DataLoader(train_dataset,
                          batch_size=args.batch_size,
                          shuffle=True,
                          num_workers=args.num_workers)

device = 'cuda' if torch.cuda.is_available() else 'cpu'

model = resnet101(pretrained=True)
model.fc = nn.Linear(model.fc.in_features, 1049)
model.to(device)

criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-5)


class AverageMeter:
    ''' Computes and stores the average and current value '''
    def __init__(self) -> None:
        self.reset()

    def reset(self) -> None:
        self.val = 0.0
        self.avg = 0.0
Ejemplo n.º 22
0
def get_backbone(name, pretrained=False):
    """ Loading backbone, defining names for skip-connections and encoder output. """

    # TODO: More backbones

    # loading backbone model
    if name == 'resnet18':
        backbone = models.resnet18(pretrained=pretrained)
    elif name == 'resnet34':
        backbone = models.resnet34(pretrained=pretrained)
    elif name == 'resnet50':
        backbone = models.resnet50(pretrained=pretrained)
    elif name == 'resnet101':
        backbone = models.resnet101(pretrained=pretrained)
    elif name == 'resnet152':
        backbone = models.resnet152(pretrained=pretrained)
    elif name == 'vgg16':
        backbone = models.vgg16_bn(pretrained=pretrained).features
    elif name == 'vgg19':
        backbone = models.vgg19_bn(pretrained=pretrained).features
    # elif name == 'inception_v3':
    #     backbone = models.inception_v3(pretrained=pretrained, aux_logits=False)
    elif name == 'densenet121':
        backbone = models.densenet121(pretrained=pretrained).features
    elif name == 'densenet161':
        backbone = models.densenet161(pretrained=pretrained).features
    elif name == 'densenet169':
        backbone = models.densenet169(pretrained=pretrained).features
    elif name == 'densenet201':
        backbone = models.densenet201(pretrained=pretrained).features
    elif name == 'unet_encoder':
        from unet_backbone import UnetEncoder
        backbone = UnetEncoder(3)
    else:
        raise NotImplemented(
            '{} backbone model is not implemented so far.'.format(name))

    # specifying skip feature and output names
    if name.startswith('resnet'):
        feature_names = [None, 'relu', 'layer1', 'layer2', 'layer3']
        backbone_output = 'layer4'
    elif name == 'vgg16':
        # TODO: consider using a 'bridge' for VGG models, there is just a MaxPool between last skip and backbone output
        feature_names = ['5', '12', '22', '32', '42']
        backbone_output = '43'
    elif name == 'vgg19':
        feature_names = ['5', '12', '25', '38', '51']
        backbone_output = '52'
    # elif name == 'inception_v3':
    #     feature_names = [None, 'Mixed_5d', 'Mixed_6e']
    #     backbone_output = 'Mixed_7c'
    elif name.startswith('densenet'):
        feature_names = [
            None, 'relu0', 'denseblock1', 'denseblock2', 'denseblock3'
        ]
        backbone_output = 'denseblock4'
    elif name == 'unet_encoder':
        feature_names = ['module1', 'module2', 'module3', 'module4']
        backbone_output = 'module5'
    else:
        raise NotImplemented(
            '{} backbone model is not implemented so far.'.format(name))

    return backbone, feature_names, backbone_output
Ejemplo n.º 23
0
    if classname.find('Conv') != -1:
        m.weight.data.normal_(0.0, 0.02)
    elif classname.find('BatchNorm') != -1:
        m.weight.data.normal_(1.0, 0.02)
        m.bias.data.fill_(0)


# ===============================================================================================================================

resnet_inits = {
    10: lambda flag: vmodels.ResNet(BasicBlock, [1, 1, 1, 1]),
    14: lambda flag: vmodels.ResNet(BasicBlock, [1, 1, 2, 2]),
    18: lambda flag: vmodels.resnet18(pretrained=flag),
    34: lambda flag: vmodels.resnet34(pretrained=flag),
    50: lambda flag: vmodels.resnet50(pretrained=flag),
    101: lambda flag: vmodels.resnet101(pretrained=flag),
    152: lambda flag: vmodels.resnet152(pretrained=flag),
}

possible_resnets = resnet_inits.keys()
resnet_outsize = {
    10: 512,
    14: 512,
    18: 512,
    34: 512,
    50: 2048,
    101: 2048,
    152: 2048,
}

Ejemplo n.º 24
0
    def __init__(self,
                 layers=50,
                 bins=(1, 2, 3, 6),
                 dropout=0.1,
                 classes=2,
                 zoom_factor=8,
                 use_ppm=True,
                 naive_ppm=False,
                 criterion=nn.CrossEntropyLoss(ignore_index=0),
                 pretrained=True):
        super(PSPNet, self).__init__()
        assert classes > 1
        assert zoom_factor in [1, 2, 4, 8]
        self.zoom_factor = zoom_factor
        self.use_ppm = use_ppm
        self.criterion = criterion

        self.layers = layers
        if layers == 18:
            resnet = models.resnet18(pretrained=pretrained)
        elif layers == 50:
            resnet = models.resnet50(pretrained=pretrained)
        else:
            resnet = models.resnet101(pretrained=pretrained)

        self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu,
                                    resnet.maxpool)
        self.layer1, self.layer2, self.layer3, self.layer4 = resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4

        # fea_dim = resnet.layer4[-1].conv3.out_channels
        fea_dim = 512
        if layers == 18:
            for n, m in self.layer3[0].named_modules():
                if 'conv1' in n:
                    m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1)
                if 'downsample.0' in n:
                    m.stride = (1, 1)
            for n, m in self.layer4[0].named_modules():
                if 'conv1' in n:
                    m.dilation, m.padding, m.stride = (4, 4), (4, 4), (1, 1)
                if 'downsample.0' in n:
                    m.stride = (1, 1)

        else:
            fea_dim = 2048
            for n, m in self.layer3.named_modules():
                if 'conv2' in n:
                    m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1)
                elif 'downsample.0' in n:
                    m.stride = (1, 1)
            for n, m in self.layer4.named_modules():
                if 'conv2' in n:
                    m.dilation, m.padding, m.stride = (4, 4), (4, 4), (1, 1)
                elif 'downsample.0' in n:
                    m.stride = (1, 1)

        if self.training:
            self.aux_head = nn.Sequential(
                nn.Conv2d(fea_dim, 256, kernel_size=3, padding=1, bias=False),
                nn.BatchNorm2d(256), nn.LeakyReLU(inplace=True),
                nn.Dropout2d(p=dropout), nn.Conv2d(256, classes,
                                                   kernel_size=1))

        if use_ppm:
            if naive_ppm:
                self.ppm = NaivePyramidPoolingModule(fea_dim, int(fea_dim))
            else:
                self.ppm = PyramidPoolingModule(fea_dim,
                                                int(fea_dim / len(bins)), bins)
            fea_dim *= 2
        self.cls_head = nn.Sequential(
            nn.Conv2d(fea_dim, 512, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(512), nn.LeakyReLU(inplace=True),
            nn.Dropout2d(p=dropout), nn.Conv2d(512, classes, kernel_size=1))
Ejemplo n.º 25
0
def extract_features(split):
    img_dir_base = './'

    transform = transforms.Compose([
        transforms.Resize([224, 224]),
        # transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225]),
    ])

    # load model
    model = models.resnet101(
        pretrained=True
    )  # NOTE: VGG is used in DiDeMo (not sure which version though)
    model = model.cuda()
    model.eval()

    def get_feature_hook(self, input, output):
        if output.data.size(0) == 1:
            out = output.data.cpu().numpy().reshape(output.data.size(1))
        else:
            out = output.data.cpu().numpy().reshape(output.data.size(0),
                                                    output.data.size(1))
        feats.append(out)

    feat_layer = model._modules.get('avgpool')
    feat_layer.register_forward_hook(get_feature_hook)
    feat_file = '/home/grapefruit/vqa/resnet101_avgpool.h5'.format(split)
    featsh5 = h5py.File(feat_file, 'w')

    print_every = 500
    cache_every = 2000
    start = time.time()
    img_paths = sorted(glob('v7w_*.jpg'))
    all_feats = []
    for i, img in enumerate(img_paths):
        if i and i % print_every == 0:
            avg_time = (time.time() - start) / print_every
            print('Processing {:d}/{:d} (avg: {:f}s)'.format(
                i, len(img_paths), avg_time))
            sys.stdout.flush()
            start = time.time()

        feats = []
        img = Image.open(open(path, 'rb'))
        img = transform(img)
        img_var = Variable(torch.unsqueeze(img, 0)).cuda()
        out = model.forward(img_var.un)
        all_feats += feats[0],

        if i and i % cache_every == 0:
            with open('/home/grapefruit/vqa/all_feats_tmp.pickle',
                      'wb') as handle:
                pickle.dump(all_feats, handle)
        # feats = np.concatenate(feats, axis=0)
        # print('type feats:', type(feats))
        # print('feats size:', feats.shape)
        # feats_arr = np.asarray(feats).squeeze()
        # print('feats_arr type:', type(feats_arr))
        featsh5.create_dataset(img[img.find('_') + 1:img.find('.')],
                               data=feats[0])
    featsh5.close()
Ejemplo n.º 26
0
    def __init__(self, roi_size, backbone):
        super().__init__()
        self.roi_size = roi_size

        if backbone == 'resnet18':
            resnet = models.resnet18(pretrained=True)
            self.pmps1_conv_so = nn.Conv2d(512,
                                           128,
                                           kernel_size=3,
                                           stride=1,
                                           padding=1)
            self.pmps1_conv_p = nn.Conv2d(512,
                                          128,
                                          kernel_size=3,
                                          stride=1,
                                          padding=1)
        elif backbone == 'resnet101':
            resnet = models.resnet101(pretrained=True)
            self.pmps1_conv_so = nn.Conv2d(2048,
                                           128,
                                           kernel_size=3,
                                           stride=1,
                                           padding=1)
            self.pmps1_conv_p = nn.Conv2d(2048,
                                          128,
                                          kernel_size=3,
                                          stride=1,
                                          padding=1)

        children = list(resnet.children())
        self.shared_conv_layers = nn.Sequential(*children[:7])

        self.pre_pmps1_so = children[7]
        self.pre_pmps1_p = copy.deepcopy(self.pre_pmps1_so)

        self.pmps1_gather_batchnorm_so = nn.BatchNorm2d(128)
        self.pmps1_gather_batchnorm_p = nn.BatchNorm2d(128)
        self.pmps1_conv_so2p = nn.Conv2d(128,
                                         128,
                                         kernel_size=3,
                                         stride=1,
                                         padding=1)
        self.pmps1_conv_p2s = nn.Conv2d(128,
                                        128,
                                        kernel_size=3,
                                        stride=1,
                                        padding=1)
        self.pmps1_conv_p2o = nn.Conv2d(128,
                                        128,
                                        kernel_size=3,
                                        stride=1,
                                        padding=1)
        self.pmps1_broadcast_batchnorm_p = nn.BatchNorm2d(128)
        self.pmps1_broadcast_batchnorm_s = nn.BatchNorm2d(128)
        self.pmps1_broadcast_batchnorm_o = nn.BatchNorm2d(128)

        self.pmps2_gather_linear_so = nn.Linear(256 * roi_size * roi_size,
                                                32 * roi_size * roi_size)
        self.pmps2_gather_linear_p = nn.Linear(256 * roi_size * roi_size,
                                               32 * roi_size * roi_size)
        self.pmps2_linear_s2p = nn.Linear(32 * roi_size * roi_size,
                                          32 * roi_size * roi_size)
        self.pmps2_linear_o2p = nn.Linear(32 * roi_size * roi_size,
                                          32 * roi_size * roi_size)
        #self.pmps2_broadcast_linear_so = nn.Linear(64 * roi_size * roi_size, 8 * roi_size * roi_size)
        self.pmps2_broadcast_linear_p = nn.Linear(32 * roi_size * roi_size,
                                                  4 * roi_size * roi_size)
        self.pmps2_gather_batchnorm_s = nn.BatchNorm1d(32 * roi_size *
                                                       roi_size)
        self.pmps2_gather_batchnorm_o = nn.BatchNorm1d(32 * roi_size *
                                                       roi_size)
        self.pmps2_gather_batchnorm_p = nn.BatchNorm1d(32 * roi_size *
                                                       roi_size)
        self.pmps2_broadcast_batchnorm_p = nn.BatchNorm1d(4 * roi_size *
                                                          roi_size)

        self.fc = nn.Linear(4 * roi_size * roi_size, 9)
Ejemplo n.º 27
0
def resnet101_COVNet_trip_max(num_classes=2, pretrained=True):
    model1 = models.resnet101(pretrained)
    model2 = models.resnet101(pretrained)
    model3 = models.resnet101(pretrained)

    return COVNet_trip_max(model1, model2, model3, num_classes)
Ejemplo n.º 28
0
Archivo: test.py Proyecto: guker/PIPNet
save_dir = os.path.join('./snapshots', cfg.data_name, cfg.experiment_name)
if not os.path.exists(save_dir):
    os.mkdir(save_dir)

if cfg.det_head == 'pip':
    meanface_indices, reverse_index1, reverse_index2, max_len = get_meanface(os.path.join('data', cfg.data_name, 'meanface.txt'), cfg.num_nb)
    
if cfg.det_head == 'pip':
    if cfg.backbone == 'resnet18':
        resnet18 = models.resnet18(pretrained=cfg.pretrained)
        net = Pip_resnet18(resnet18, cfg.num_nb, num_lms=cfg.num_lms, input_size=cfg.input_size, net_stride=cfg.net_stride)
    elif cfg.backbone == 'resnet50':
        resnet50 = models.resnet50(pretrained=cfg.pretrained)
        net = Pip_resnet50(resnet50, cfg.num_nb, num_lms=cfg.num_lms, input_size=cfg.input_size, net_stride=cfg.net_stride)
    elif cfg.backbone == 'resnet101':
        resnet101 = models.resnet101(pretrained=cfg.pretrained)
        net = Pip_resnet101(resnet101, cfg.num_nb, num_lms=cfg.num_lms, input_size=cfg.input_size, net_stride=cfg.net_stride)
    else:
        print('No such backbone!')
        exit(0)
else:
    print('No such head:', cfg.det_head)
    exit(0)

if cfg.use_gpu:
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
else:
    device = torch.device("cpu")
net = net.to(device)

weight_file = os.path.join(save_dir, 'epoch%d.pth' % (cfg.num_epochs-1))
Ejemplo n.º 29
0
    def __init__(self,
                 num_classes=1,
                 num_filters=32,
                 pretrained=False,
                 is_deconv=True):
        """
        :param num_classes:
        :param num_filters:
        :param pretrained:
            False - no pre-trained network is used
            True  - encoder is pre-trained with resnet34
        :is_deconv:
            False: bilinear interpolation is used in decoder
            True: deconvolution is used in decoder
        """
        super().__init__()
        self.num_classes = num_classes

        self.pool = nn.MaxPool2d(2, 2)

        self.encoder = models.resnet101(pretrained=pretrained)

        self.relu = nn.ReLU(inplace=True)

        self.conv1 = nn.Sequential(self.encoder.conv1, self.encoder.bn1,
                                   self.encoder.relu, self.pool)

        self.conv2 = self.encoder.layer1

        self.conv3 = self.encoder.layer2

        self.conv4 = self.encoder.layer3

        self.conv5 = self.encoder.layer4

        bottom_channel_nr = 2048

        self.center = DecoderBlockV2(bottom_channel_nr, num_filters * 8 * 2,
                                     num_filters * 8, is_deconv)
        self.dec5 = DecoderBlockV2(bottom_channel_nr + num_filters * 8,
                                   num_filters * 8 * 2, num_filters * 8,
                                   is_deconv)
        self.dec4 = DecoderBlockV2(bottom_channel_nr // 2 + num_filters * 8,
                                   num_filters * 8 * 2, num_filters * 8,
                                   is_deconv)
        self.dec3 = DecoderBlockV2(bottom_channel_nr // 4 + num_filters * 8,
                                   num_filters * 4 * 2, num_filters * 2,
                                   is_deconv)
        self.dec2 = DecoderBlockV2(bottom_channel_nr // 8 + num_filters * 2,
                                   num_filters * 2 * 2, num_filters * 2 * 2,
                                   is_deconv)
        self.dec1 = DecoderBlockV2(num_filters * 2 * 2, num_filters * 2 * 2,
                                   num_filters, is_deconv)
        self.dec0 = ConvRelu(num_filters, num_filters)
        #self.final = nn.Conv2d(num_filters, num_classes, kernel_size=1)
        self.final = nn.Conv2d(2816 - 2048, num_classes, kernel_size=1)
        self.drop = nn.Dropout(p=0.5)

        #SE blocks
        self.SE1 = SCSE(64, 16)
        self.SE2 = SCSE(256, 16)
        self.SE3 = SCSE(512, 16)
        self.SE4 = SCSE(1024, 16)
        self.SE5 = SCSE(2048, 16)
        self.SE6 = SCSE(256, 16)
        self.SE7 = SCSE(256, 16)
        self.SE8 = SCSE(64, 16)
        self.SE9 = SCSE(128, 16)
        self.SE10 = SCSE(32, 16)
        self.SE11 = SCSE(32, 16)

        if num_classes == 1:
            self.out_act = nn.Sigmoid()
        else:
            self.out_act = nn.Softmax(dim=1)
# -*- coding: utf-8 -*-

from matplotlib import pyplot as plt
import numpy as np

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

torch.set_printoptions(edgeitems=2)
torch.manual_seed(123)

from torchvision import models

alexnet = models.AlexNet()

resnet = models.resnet101(pretrained=True)

print(resnet)
Ejemplo n.º 31
0
def resnet101_COVNet_double_scoremax(num_classes=2, pretrained=True):
    model1 = models.resnet101(pretrained)
    model2 = models.resnet101(pretrained)

    return COVNet_double_scoremax(model1, model2, num_classes)
def initialize_model(model_name, num_classes, freeze_layers, use_pretrained=True):
    # Initialize these variables which will be set in this if statement. Each of these
    #   variables is model specific.
    model_ft = None

    if model_name == "resnet50":
        """ Resnet50
        """
        model_ft = models.resnet50(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, freeze_layers)
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)
        
    elif model_name == "resnet101":
        """ Resnet101
        """
        model_ft = models.resnet101(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, freeze_layers)
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)
        
    elif model_name == "resnet152":
        """ Resnet152
        """
        model_ft = models.resnet152(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, freeze_layers)
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)

    elif model_name == "alexnet":
        """ Alexnet
        """
        model_ft = models.alexnet(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, freeze_layers)
        num_ftrs = model_ft.classifier[6].in_features
        model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)

    elif model_name == "vgg":
        """ VGG11_bn
        """
        model_ft = models.vgg11_bn(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, freeze_layers)
        num_ftrs = model_ft.classifier[6].in_features
        model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)

    elif model_name == "squeezenet":
        """ Squeezenet
        """
        model_ft = models.squeezenet1_0(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, freeze_layers)
        model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1,1), stride=(1,1))
        model_ft.num_classes = num_classes

    elif model_name == "densenet121":
        """ Densenet121
        """
        model_ft = models.densenet121(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, freeze_layers)
        num_ftrs = model_ft.classifier.in_features
        model_ft.classifier = nn.Linear(num_ftrs, num_classes)

    elif model_name == "densenet169":
        """ Densenet169
        """
        model_ft = init_densenet169()
        set_parameter_requires_grad(model_ft, freeze_layers)

        
    elif model_name == "densenet201":
        """ Densenet201
        """
        model_ft = models.densenet201(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, freeze_layers)
        num_ftrs = model_ft.classifier.in_features
        model_ft.classifier = nn.Linear(num_ftrs, num_classes)

    elif model_name == "inception":
        """ Inception v3
        Be careful, expects (299,299) sized images and has auxiliary output
        """
        model_ft = models.inception_v3(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, freeze_layers)
        # Handle the auxilary net
        num_ftrs = model_ft.AuxLogits.fc.in_features
        model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)
        # Handle the primary net
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs,num_classes)

    elif model_name == "delf":
        """ DELF using our pretrained Densenet169 features """
        model_ft = init_delf(num_classes)
        set_parameter_requires_grad(model_ft, 2)
        
    elif model_name == "delf_TL":
        """ DELF using our pretrained Densenet169 features, without FC layer """
        model_ft = init_delf_TL()
        set_parameter_requires_grad(model_ft, 2)
        
    elif model_name == "our_densenet_TL":
        """ Our pretrained Densenet169 without FC layer """
        model_ft = init_densenet_TL()
        set_parameter_requires_grad(model_ft, freeze_layers)
        
    elif model_name == "resnet101gem":
        model_ft = init_resnet101gem()
        set_parameter_requires_grad(model_ft, 0)
    
    elif model_name == "delf_pca":
        model_ft = init_delf_pca()
        set_parameter_requires_grad(model_ft, 1)
        
    else:
        print("Invalid model name, exiting...")
        exit()
    
#     model_ft = nn.Sequential(*list(model_ft.children()))
    
    return model_ft
Ejemplo n.º 33
0
from sklearn.model_selection import StratifiedKFold
import matplotlib.pyplot as plt
from sklearn.metrics import cohen_kappa_score
from tqdm import tqdm_notebook as tqdm
from pytorch_lightning.core.lightning import LightningModule
import random
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from torchvision.models import resnet101

import sys
sys.path.append("../")
from utils.data_utils import get_tiles_new
import json

resnet101(True)

SAVE_NAME = "effnetb0_256_36_new"
FP16 = True
batch_size = 2
num_workers = min(batch_size, 8)
ACCUM_STEPS = 1

data_dir = '../input/prostate-cancer-grade-assessment'
df_train = pd.read_csv(os.path.join(data_dir, 'train.csv'))
image_folder = os.path.join(data_dir, 'train_images')

with open("../notebooks/new_boxes_256_36.json", "r") as file:
    boxes_info = json.load(file)

kernel_type = SAVE_NAME
Ejemplo n.º 34
0
    def __init__(self, num_classes, is_test=False, config=None, num_lstm=5):
        """Compose a SSD model using the given components.
		"""
        super(ResNetLSTM3, self).__init__()

        # alpha = 1
        # alpha_base = alpha
        # alpha_ssd = 0.5 * alpha
        # alpha_lstm = 0.25 * alpha

        resnet = resnet101(pretrained=True)
        all_modules = list(resnet.children())
        modules = all_modules[:-4]
        self.base_net = nn.Sequential(*modules)

        modules = all_modules[6:7]
        self.conv_final = nn.Sequential(*modules)

        self.num_classes = num_classes
        self.is_test = is_test
        self.config = config

        # lstm_layers = [BottleNeckLSTM(1024, 256),
        # 			   BottleNeckLSTM(256, 64),
        # 			   BottleNeckLSTM(64, 16),
        # 			   ConvLSTMCell(16, 16),
        # 			   ConvLSTMCell(16, 16)]

        lstm_layers = [
            BottleNeckLSTM(1024, 1024),
            BottleNeckLSTM(512, 512),
            BottleNeckLSTM(256, 256),
            ConvLSTMCell(256, 256),
            ConvLSTMCell(256, 256)
        ]

        self.lstm_layers = nn.ModuleList(
            [lstm_layers[i] for i in range(num_lstm)])

        self.extras = ModuleList([
            Sequential(
                Conv2d(in_channels=1024, out_channels=256, kernel_size=1),
                ReLU(),
                Conv2d(in_channels=256,
                       out_channels=512,
                       kernel_size=3,
                       stride=2,
                       padding=1), ReLU()),
            Sequential(
                Conv2d(in_channels=512, out_channels=128, kernel_size=1),
                ReLU(),
                Conv2d(in_channels=128,
                       out_channels=256,
                       kernel_size=3,
                       stride=2,
                       padding=1), ReLU()),
            Sequential(
                Conv2d(in_channels=256, out_channels=128, kernel_size=1),
                ReLU(), Conv2d(in_channels=128,
                               out_channels=256,
                               kernel_size=3), ReLU()),
            Sequential(
                Conv2d(in_channels=256, out_channels=128, kernel_size=1),
                ReLU(), Conv2d(in_channels=128,
                               out_channels=256,
                               kernel_size=3), ReLU())
        ])

        self.regression_headers = ModuleList([
            Conv2d(in_channels=512,
                   out_channels=4 * 4,
                   kernel_size=3,
                   padding=1),
            Conv2d(in_channels=1024,
                   out_channels=6 * 4,
                   kernel_size=3,
                   padding=1),
            Conv2d(in_channels=512,
                   out_channels=6 * 4,
                   kernel_size=3,
                   padding=1),
            Conv2d(in_channels=256,
                   out_channels=6 * 4,
                   kernel_size=3,
                   padding=1),
            Conv2d(in_channels=256,
                   out_channels=4 * 4,
                   kernel_size=3,
                   padding=1),
            Conv2d(in_channels=256,
                   out_channels=4 * 4,
                   kernel_size=3,
                   padding=1),  # TODO: change to kernel_size=1, padding=0?
        ])

        self.classification_headers = ModuleList([
            Conv2d(in_channels=512,
                   out_channels=4 * num_classes,
                   kernel_size=3,
                   padding=1),
            Conv2d(in_channels=1024,
                   out_channels=6 * num_classes,
                   kernel_size=3,
                   padding=1),
            Conv2d(in_channels=512,
                   out_channels=6 * num_classes,
                   kernel_size=3,
                   padding=1),
            Conv2d(in_channels=256,
                   out_channels=6 * num_classes,
                   kernel_size=3,
                   padding=1),
            Conv2d(in_channels=256,
                   out_channels=4 * num_classes,
                   kernel_size=3,
                   padding=1),
            Conv2d(in_channels=256,
                   out_channels=4 * num_classes,
                   kernel_size=3,
                   padding=1),  # TODO: change to kernel_size=1, padding=0?
        ])

        self.device = torch.device(
            f"cuda:{args.gpu}" if torch.cuda.is_available() else "cpu")
        if is_test:
            self.config = config
            self.priors = config.priors.to(self.device)
Ejemplo n.º 35
0
def resnet101_COVNet(num_classes=2, pretrained=True):
    model = models.resnet101(pretrained)

    return COVNet(model, num_classes)
Ejemplo n.º 36
0
def train():
    data_transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    #print(DATASET_ROOT)
    train_set = IMAGE_Dataset(Path(DATASET_ROOT), data_transform)
    data_loader = DataLoader(dataset=train_set,
                             batch_size=32,
                             shuffle=True,
                             num_workers=1)
    #print(train_set.num_classes)

    resnet101 = models.resnet101(pretrained=False)  #esnet101
    fc_features = resnet101.fc.in_features
    resnet101.fc = nn.Linear(fc_features, 196)
    model = resnet101.cuda(CUDA_DEVICES)
    model.train()

    best_model_params = copy.deepcopy(model.state_dict())
    best_acc = 0.0
    num_epochs = 10
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(params=model.parameters(),
                                lr=0.01,
                                momentum=0.9)

    file = open("acc_output.txt", mode='w')
    file = open("loss_output.txt", mode='w')

    for epoch in range(num_epochs):
        print(f'Epoch: {epoch + 1}/{num_epochs}')
        print('-' * len(f'Epoch: {epoch + 1}/{num_epochs}'))

        training_loss = 0.0
        training_corrects = 0

        for i, (inputs, labels) in enumerate(data_loader):
            inputs = Variable(inputs.cuda(CUDA_DEVICES))
            labels = Variable(labels.cuda(CUDA_DEVICES))

            optimizer.zero_grad()

            outputs = model(inputs)
            _, preds = torch.max(outputs.data, 1)
            loss = criterion(outputs, labels)

            loss.backward()
            optimizer.step()

            training_loss += loss.item() * inputs.size(0)
            #revise loss.data[0]-->loss.item()
            training_corrects += torch.sum(preds == labels.data)
            #print(f'training_corrects: {training_corrects}')

        training_loss = training_loss / len(train_set)
        training_acc = training_corrects.double() / len(train_set)
        # print(training_acc.type())
        # print(f'training_corrects: {training_corrects}\tlen(train_set):{len(train_set)}\n')
        print(
            f'Training loss: {training_loss:.4f}\taccuracy: {training_acc:.4f}\n'
        )

        if training_acc > best_acc:
            best_acc = training_acc
            best_model_params = copy.deepcopy(model.state_dict())

        file = open("acc_output.txt", mode='a')
        file.write('%s ,' % training_acc)

        file = open("loss_output.txt", mode='a')
        file.write('%s ,' % training_loss)

        if ((epoch + 1) % 1 == 0):
            model.load_state_dict(best_model_params)
            torch.save(model, f'model-{training_acc:.04f}-{epoch + 1:2d}.pth')

    file.close()
    file.close()
Ejemplo n.º 37
0
def model_selector(model_name, num_classes=9, pretrained=False, scale_factor=6):
    # if pretrained:
    #     print("Pretrained-> Remember at end: {}".format(
    #         "transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])"))

    if model_name == "resnet18":
        if not pretrained:
            return ResNet18(num_classes=num_classes).cuda()
        else:
            resnet18 = models.resnet18(pretrained=True)
            resnet18.fc = nn.Linear(resnet18.fc.in_features, num_classes)
            for param in resnet18.parameters():  # Defrost model
                param.requires_grad = True
            return resnet18.cuda()
    elif model_name == "resnet34":
        if not pretrained:
            return ResNet34(num_classes=num_classes).cuda()
        else:
            resnet34 = models.resnet34(pretrained=True)
            resnet34.fc = nn.Linear(resnet34.fc.in_features, num_classes)
            for param in resnet34.parameters():  # Defrost model
                param.requires_grad = True
            return resnet34.cuda()
    elif model_name == "resnet50":
        if not pretrained:
            return ResNet50(num_classes=num_classes).cuda()
        else:
            resnet50 = models.resnet50(pretrained=True)
            resnet50.fc = nn.Linear(resnet50.fc.in_features, num_classes)
            for param in resnet50.parameters():  # Defrost model
                param.requires_grad = True
            return resnet50.cuda()
    elif model_name == "resnet101":
        if not pretrained:
            return ResNet101(num_classes=num_classes).cuda()
        else:
            resnet101 = models.resnet101(pretrained=True)
            resnet101.fc = nn.Linear(resnet101.fc.in_features, num_classes)
            for param in resnet101.parameters():  # Defrost model
                param.requires_grad = True
            return resnet101.cuda()
    elif model_name == "resnet152":
        if not pretrained:
            return ResNet152(num_classes=num_classes).cuda()
        else:
            resnet152 = models.resnet152(pretrained=True)
            resnet152.fc = nn.Linear(resnet152.fc.in_features, num_classes)
            for param in resnet152.parameters():  # Defrost model
                param.requires_grad = True
            return resnet152.cuda()
    elif model_name == "seresnext50":
        return PretrainedSeresNext50(num_classes, pretrained=pretrained).cuda()
    elif model_name == "mobilenetwd4":
        return PretrainedMobilenetWD4(num_classes, pretrained=pretrained).cuda()
    elif model_name == "resnext50_32":
        return PretrainedResNext50_32(num_classes, pretrained=pretrained).cuda()
    elif model_name == "resnext101_32":
        return PretrainedResNext101_32(num_classes, pretrained=pretrained).cuda()
    elif model_name == "seresnext101":
        return PretrainedSeresNext101(num_classes, pretrained=pretrained).cuda()
    elif model_name == "bam_resnet50":
        return bam_resnet50(num_classes, pretrained=pretrained).cuda()
    elif "unet" in model_name and "small" in model_name:
        return small_segmentation_model_selector(model_name, num_classes, scale_factor).cuda()
    if "unet_resnet34_pretrained" == model_name:
        return resnet_model_selector(model_name, num_classes)
    else:
        assert False, "Uknown model selected!"