Пример #1
0
def load_premodel(pretrain_path):
    print('loading pretrained model {}'.format(pretrain_path))
    model = resnet.generate_model(model_depth=152,
                                  n_classes=700,
                                  n_input_channels=3,
                                  shortcut_type='B',
                                  conv1_t_size=7,
                                  conv1_t_stride=1,
                                  no_max_pool=False,
                                  widen_factor=1.0)

    pretrain = torch.load(pretrain_path, map_location='cpu')
    print('model pass')
    model.load_state_dict(pretrain['state_dict'])
    #print(model)
    return model
Пример #2
0
    def __init__(self, config):

        super(MedicNet, self).__init__()
        self.bias = config.fc_bias
        self.mlp_indim = config.mlp_indim
        self.lstm_indim = config.lstm_indim
        self.hidden_dim = config.hidden_dim
        self.num_classes = config.num_classes
        self.seq_len = config.seq_len
        self.batch_size = config.batch_size
        self.densenet_drop_rate = config.densenet_drop_rate
        self.encoder_3d = config.encoder_3d
        self.sdata_pool = config.sdata_pool
        self.clinical_att = config.clinical_att
        self.att_weights = torch.ones((1, 61)).cuda() * 0.5
        self.clinical_backbone = config.clinical_backbone
        self.lstm_all_output = config.lstm_all_output
        self.lstm_att = config.lstm_att
        self.clinical_augmentation = config.clinical_augmentation

        self.layer1 = self._make_layer(2, 32, self.mlp_indim, 31, 1, stride=1)
        self.layer2 = self._make_layer(32, 64, 31, 16, 1, stride=1)
        self.layer3 = self._make_layer(64, 128, 16, 8, 1, stride=1)

        self.sdata_encoder = nn.Sequential(
            nn.Linear(self.mlp_indim, 64, bias=self.bias), nn.LayerNorm(64),
            nn.RReLU(inplace=True), nn.Linear(64, 96, bias=self.bias),
            nn.LayerNorm(96), nn.RReLU(inplace=True),
            nn.Linear(96, 128, bias=self.bias))

        self.clinical_res_bn_relu1 = nn.Sequential(
            nn.BatchNorm1d(self.mlp_indim),
            nn.ReLU(inplace=True),
        )
        self.clinical_res_bn_relu2 = nn.Sequential(
            nn.BatchNorm1d(self.mlp_indim),
            nn.ReLU(inplace=True),
        )
        self.clinical_res_bn_relu3 = nn.Sequential(
            nn.BatchNorm1d(self.mlp_indim),
            nn.ReLU(inplace=True),
        )

        self.clinical_res_bn1 = nn.BatchNorm1d(self.mlp_indim)

        self.bn1 = nn.BatchNorm1d(self.mlp_indim)
        self.bn2 = nn.BatchNorm1d(self.mlp_indim)
        self.bn3 = nn.BatchNorm1d(self.mlp_indim)

        self.clinical_encoder_stage1 = nn.Sequential(
            nn.Linear(self.mlp_indim, self.mlp_indim, bias=self.bias),
            nn.ReLU(inplace=True))

        self.clinical_encoder_stage2 = nn.Sequential(
            nn.Linear(self.mlp_indim, self.mlp_indim, bias=self.bias),
            nn.ReLU(inplace=True))

        self.clinical_encoder_stage3 = nn.Sequential(
            nn.Linear(self.mlp_indim, self.mlp_indim, bias=self.bias),
            nn.ReLU(inplace=True))

        self.classifier_final = nn.Sequential(
            nn.Linear(config.hidden_dim * config.seq_len, 512, bias=self.bias),
            nn.BatchNorm1d(512),
            nn.ReLU(inplace=True),
            nn.Linear(512, 256),
            nn.BatchNorm1d(256),
            nn.ReLU(inplace=True),
            nn.Linear(256, config.num_classes, bias=self.bias),
        )

        self.sdata_avg_pool = nn.AdaptiveAvgPool1d(1)
        self.sdata_max_pool = nn.AdaptiveMaxPool1d(1)

        self.clinical_att = nn.Sequential(nn.Linear(self.mlp_indim, 128),
                                          nn.BatchNorm1d(128),
                                          nn.ReLU(inplace=True),
                                          nn.Linear(128, self.mlp_indim))

        self.lstm_att = nn.Sequential(nn.Linear(self.lstm_indim, 128),
                                      nn.BatchNorm1d(128),
                                      nn.ReLU(inplace=True),
                                      nn.Linear(128, self.lstm_indim))

        if config.seq_processor == 'lstm':
            self.lstm = nn.LSTM(self.lstm_indim, self.hidden_dim, num_layers=1)
        elif config.seq_processor == 'gru':
            self.lstm = nn.GRU(self.lstm_indim, self.hidden_dim, num_layers=1)
        else:
            raise NotImplementedError

        if config.encoder_3d == 'baseline':
            self.encoder_3d = Baseline(self.num_classes)
        elif config.encoder_3d == 'densenet':
            self.encoder_3d = DenseNet(bn_size=self.batch_size,
                                       drop_rate=self.densenet_drop_rate,
                                       config=config,
                                       efficient=True)
        elif config.encoder_3d == 'resnet':
            self.encoder_3d = generate_model(config)

        self.sdata_dropout = nn.Dropout(p=0.02)
Пример #3
0
def load_pretrained_model_utils(model_path):
    ckpt = torch.load(model_path)
    resnet = ResNet.generate_model(model_depth=18, n_classes=700)
    resnet.load_state_dict(ckpt['state_dict'])
    return resnet
Пример #4
0
import math
 
from PIL import Image

import resnet
from spatial_transforms import (Compose, Normalize, Resize, CenterCrop,
                                CornerCrop, MultiScaleCornerCrop,
                                RandomResizedCrop, RandomHorizontalFlip,
                                ToTensor, ScaleValue, ColorJitter,
                                PickFirstChannels)

# load the trained model and label binarizer from disk
print('Loading model and label binarizer...')
lb = joblib.load("./outputs/lb.pkl")

model = resnet.generate_model(model_depth=50, n_classes=700)

fc = torch.nn.Linear(2048, 2) # requires_grad=True by deafault
model.fc = fc
print('Model Loaded...')

model.load_state_dict(torch.load("./outputs/fight_reco_3DCNNmodel.pth"))
print('Loaded model state_dict...')

device = torch.device('cuda:0')
model.to(device)

value_scale = 1
mean = [0.4345, 0.4051, 0.3775]
std = [0.2768, 0.2713, 0.2737]
Пример #5
0
import av
import numpy as np
import torch.nn.functional as F
import resnet
from PIL import Image
from spatial_transforms import (Compose, Normalize, Resize, CenterCrop,
                                CornerCrop, MultiScaleCornerCrop,
                                RandomResizedCrop, RandomHorizontalFlip,
                                ToTensor, ScaleValue, ColorJitter,
                                PickFirstChannels)
from collections import namedtuple

model = resnet.generate_model(model_depth=50,
                              n_classes=2,
                              n_input_channels=3,
                              shortcut_type="B",
                              conv1_t_size=7,
                              conv1_t_stride=1,
                              no_max_pool=False,
                              widen_factor=1.0)

state_dict = torch.load("r3d50_KMS_200ep-photosensitivity-200.pth",
                        map_location='cpu')['state_dict']
model.load_state_dict(state_dict)


def get_normalize_method(mean, std, no_mean_norm, no_std_norm):
    if no_mean_norm:
        if no_std_norm:
            return Normalize([0, 0, 0], [1, 1, 1])
        else:
            return Normalize([0, 0, 0], std)
Пример #6
0
def train(img_root_dir: str,
          dataloaders,
          image_datasets,
          num_class: int,
          num_epochs: int,
          model_depth: int,
          optimizer: str,
          lr: float,
          batch_size: int,
          weight_decay: float = 0):
    """
    Args:
        img_root_dir (str): the absolute path of the top directory of the whole dataset
        dataloaders (Dict[str: torch.utils.data.DataLoader]): dataloaders
        image_datasets (Dict[str: torch.utils.data.Dataset]): datasets
        num_class (int): the number of classes.
        num_epochs (int): the number of epochs to train the model.
        model (str): choose which kind of models to use.
        optimizer (str): choose which kind of optimizer to use.
        lr (float): learning rate
        batch_size (int): batch size
        weight_decay (float, optional): weight_decay(L2 penalty). Default is 0.
    Return:
        prefix (str): the prefix of the parameter file of the model.
    """
    assert model_depth in [
        10, 18, 34, 50, 101, 152, 200
    ], "model_depth should be in [10, 18, 34, 50, 101, 152, 200], but got {}".format(
        model_depth)
    model = generate_model(model_depth)
    model = model.to(device)

    criterion = nn.CrossEntropyLoss()
    # Observe that all parameters are being optimized
    assert optimizer in [
        'sgd', 'adam'
    ], 'Not supported optimizer type: {}, only support sgd and adam'.format(
        optimizer)
    if optimizer == 'sgd':
        optimizer_ft = optim.SGD(model.parameters(),
                                 lr=lr,
                                 momentum=0.9,
                                 weight_decay=weight_decay)
    else:
        optimizer_ft = optim.Adam(model.parameters(),
                                  lr=lr,
                                  weight_decay=weight_decay)
    # Decay LR by a factor of 0.1 every 7 epochs
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft,
                                           step_size=7,
                                           gamma=0.1)

    prefix = 'resnet_3d_' + str(model_depth) + "_bs" + str(
        batch_size) + "_optim_" + optimizer + "_lr" + str(lr) + "_wd" + str(
            weight_decay) + "_epochs" + str(
                num_epochs) + "_data_" + img_root_dir.split('/')[-1]
    model = train_model(dataloaders,
                        image_datasets,
                        model,
                        criterion,
                        optimizer_ft,
                        exp_lr_scheduler,
                        num_epochs=num_epochs,
                        prefix=prefix,
                        num_class=num_class)
    torch.save(model, "./models/" + prefix + "_best_acc.pkl")
    return prefix