Exemplo n.º 1
0
def main(
        save_path=cfg.save,  # configuration file
        n_epochs=cfg.n_epochs,
        seed=cfg.seed):
    # set seed
    if seed is not None:
        set_seed(cfg.seed)
    cudnn.benchmark = True  # improve efficiency
    # back up your code
    os.makedirs(save_path)
    copy_file_backup(save_path)
    redirect_stdout(save_path)

    # Datasets
    valid_set = None
    test_set = CACTwoClassDataset(crop_size=[48, 48, 48],
                                  data_path=env.data,
                                  datatype=2,
                                  fill_with=-1)

    # Define model
    model_dict = {
        'resnet18': ClsResNet,
        'vgg16': ClsVGG,
        'densenet121': ClsDenseNet
    }
    model = model_dict[cfg.backbone](pretrained=cfg.pretrained,
                                     num_classes=2,
                                     backbone=cfg.backbone)

    # convert to counterparts and load pretrained weights according to various convolution
    if cfg.conv == 'ACSConv':
        model = model_to_syncbn(ACSConverter(model))
    if cfg.conv == 'Conv2_5d':
        model = model_to_syncbn(Conv2_5dConverter(model))
    if cfg.conv == 'Conv3d':
        if cfg.pretrained_3d == 'i3d':
            model = model_to_syncbn(Conv3dConverter(model, i3d_repeat_axis=-3))
        else:
            model = model_to_syncbn(
                Conv3dConverter(model, i3d_repeat_axis=None))
            if cfg.pretrained_3d == 'video':
                model = load_video_pretrained_weights(
                    model, env.video_resnet18_pretrain_path)
            elif cfg.pretrained_3d == 'mednet':
                model = load_mednet_pretrained_weights(
                    model, env.mednet_resnet18_pretrain_path)
    # print(model)
    torch.save(model.state_dict(), os.path.join(save_path, 'model.dat'))
    model_path = '/cluster/home/it_stu167/wwj/classification_after_crop/result/CACClass/resnet18/ACSConv/48_1-2_m0/epoch_107/model.dat'
    model.load_state_dict(torch.load(model_path))
    # train and test the model
    train(model=model,
          valid_set=valid_set,
          test_set=test_set,
          save=save_path,
          n_epochs=n_epochs)

    print('Done!')
Exemplo n.º 2
0
def main(save_path=cfg.save, n_epochs=cfg.n_epochs, seed=cfg.seed):
    # set seed
    if seed is not None:
        set_seed(cfg.seed)
    cudnn.benchmark = True
    # back up your code
    os.makedirs(save_path)
    copy_file_backup(save_path)
    redirect_stdout(save_path)

    # Datasets
    train_set = LIDCSegDataset(crop_size=48,
                               move=5,
                               data_path=env.data,
                               train=True)
    valid_set = None
    test_set = LIDCSegDataset(crop_size=48,
                              move=5,
                              data_path=env.data,
                              train=False)

    # Define model
    model_dict = {
        'resnet18': FCNResNet,
        'vgg16': FCNVGG,
        'densenet121': FCNDenseNet
    }
    model = model_dict[cfg.backbone](pretrained=cfg.pretrained,
                                     num_classes=2,
                                     backbone=cfg.backbone)

    # convert to counterparts and load pretrained weights according to various convolution
    if cfg.conv == 'ACSConv':
        model = model_to_syncbn(ACSConverter(model))
    if cfg.conv == 'Conv2_5d':
        model = model_to_syncbn(Conv2_5dConverter(model))
    if cfg.conv == 'Conv3d':
        if cfg.pretrained_3d == 'i3d':
            model = model_to_syncbn(Conv3dConverter(model, i3d_repeat_axis=-3))
        else:
            model = model_to_syncbn(
                Conv3dConverter(model, i3d_repeat_axis=None))
            if cfg.pretrained_3d == 'video':
                model = load_video_pretrained_weights(
                    model, env.video_resnet18_pretrain_path)
            elif cfg.pretrained_3d == 'mednet':
                model = load_mednet_pretrained_weights(
                    model, env.mednet_resnet18_pretrain_path)
    print(model)
    torch.save(model.state_dict(), os.path.join(save_path, 'model.dat'))
    # train and test the model
    train(model=model,
          train_set=train_set,
          valid_set=valid_set,
          test_set=test_set,
          save=save_path,
          n_epochs=n_epochs)

    print('Done!')
Exemplo n.º 3
0
def main(save_path=cfg.save, n_epochs=cfg.n_epochs, seed=cfg.seed):
    if seed is not None:
        set_seed(cfg.seed)
    cudnn.benchmark = True

    os.makedirs(save_path)
    copy_file_backup(save_path)
    redirect_stdout(save_path)
    # Datasets
    train_data = env.data_train
    test_data = env.data_test
    shape_cp = env.shape_checkpoint

    train_set = BaseDatasetVoxel(train_data, cfg.train_samples)
    valid_set = None
    test_set = BaseDatasetVoxel(test_data, 200)

    # # Models

    model = UNet(6)
    if cfg.conv == 'Conv3D':
        model = Conv3dConverter(model)
        initialize(model.modules())
    elif cfg.conv == 'Conv2_5D':
        if cfg.pretrained:
            shape_cp = torch.load(shape_cp)
            shape_cp.popitem()
            shape_cp.popitem()
            incompatible_keys = model.load_state_dict(shape_cp, strict=False)
            print('load shape pretrained weights\n', incompatible_keys)
        model = Conv2_5dConverter(model)
    elif cfg.conv == 'ACSConv':
        # You can use either the naive ``ACSUNet`` or the ``ACSConverter(model)``
        model = ACSConverter(model)
        # model = ACSUNet(6)
        if cfg.pretrained:
            shape_cp = torch.load(shape_cp)
            shape_cp.popitem()
            shape_cp.popitem()
            incompatible_keys = model.load_state_dict(shape_cp, strict=False)
            print('load shape pretrained weights\n', incompatible_keys)
    else:
        raise ValueError('not valid conv')

    print(model)
    torch.save(model.state_dict(), os.path.join(save_path, 'model.dat'))
    # Train the model
    train(model=model,
          train_set=train_set,
          valid_set=valid_set,
          test_set=test_set,
          save=save_path,
          n_epochs=n_epochs)

    print('Done!')
Exemplo n.º 4
0
def main():
    torch.cuda.empty_cache()
    torch.cuda.empty_cache()

    ##HERE START

    # Initialize the model for this run
    model, input_size = initialize_model(MODEL,
                                         num_classes,
                                         feature_extract,
                                         use_pretrained=True)

    #convert to ACS
    model_3d = ACSConverter(model)

    #unet_3d = ACSUnet(num_classes=2)

    ## datagenerator for pytorch

    X_train, X_test, y_train, y_test = train_test_split(CT_paths,
                                                        labels,
                                                        test_size=0.30,
                                                        random_state=42)

    X_train, X_val, y_train, y_val = train_test_split(X_train,
                                                      y_train,
                                                      test_size=0.15,
                                                      random_state=24)

    train_set = PytorchDataGenerator(X_train, y_train, dim=DIMS)
    train_gen = torch.utils.data.DataLoader(train_set,
                                            batch_size=BS,
                                            shuffle=True)

    val_set = PytorchDataGenerator(X_val, y_val, dim=DIMS)
    val_gen = torch.utils.data.DataLoader(val_set, batch_size=BS, shuffle=True)

    test_set = PytorchDataGenerator(X_test, y_test, dim=DIMS)
    test_gen = torch.utils.data.DataLoader(test_set,
                                           batch_size=BS,
                                           shuffle=True)

    dataloaders_dict = {"train": train_gen, "val": val_gen}
    # Gather the parameters to be optimized/updated in this run. If we are
    #  finetuning we will be updating all parameters. However, if we are
    #  doing feature extract method, we will only update the parameters
    #  that we have just initialized, i.e. the parameters with requires_grad
    #  is True.

    ##FREEZE THE PARAMETERS

    params_to_update = model.parameters()
    print("Params to learn:")
    if feature_extract:
        params_to_update = []
        for name, param in model.named_parameters():
            if "fc" in name:
                param.requires_grad = True
            else:
                param.requires_grad = False

            if param.requires_grad == True:
                params_to_update.append(param)
                print("\t", name)
    else:
        for name, param in model.named_parameters():
            if param.requires_grad == True:
                print("\t", name)


#    summary(model_3d, (n_channels,DIMS[0],DIMS[1],DIMS[2]))

# Observe that all parameters are being optimized
    optimizer = torch.optim.Adam([
        dict(params=params_to_update, lr=INIT_LR),
    ])

    # Setup the loss fxn
    criterion = nn.CrossEntropyLoss()

    # Train and evaluate
    #model, hist = train_model(model, dataloaders_dict, criterion, optimizer, num_epochs=EPOCHS, is_inception=(MODEL=="inception"))

    new_model = FeatureExtractor(model_3d)
    new_model.to(device)
    t_features, v_features, y_train, y_val = extract(
        new_model, dataloaders=dataloaders_dict)

    print("check")
    print(
        f"[INFO] {len(t_features)} shape: {t_features[0].shape} {len(y_train)} {y_train[0].shape}"
    )

    t_features_df = pd.DataFrame(t_features)
    v_features_df = pd.DataFrame(v_features)

    t_features_df["target"] = y_train
    v_features_df["target"] = y_val

    t_features_df.to_csv("train_features_acs.csv", index=False)
    v_features_df.to_csv("val_features_acs.csv", index=False)
Exemplo n.º 5
0
def main(save_path=cfg.save, n_epochs=cfg.n_epochs, seed=cfg.seed):
    # set seed
    if seed is not None:
        set_seed(cfg.seed)
    cudnn.benchmark = True
    # back up your code
    os.makedirs(save_path)
    copy_file_backup(save_path)
    redirect_stdout(save_path)

    # Datasets
    train_set = CACSegDataset(crop_size=[48, 48, 48],
                              data_path=env.data,
                              random=cfg.random,
                              datatype=0)
    valid_set = CACSegDataset(crop_size=[48, 48, 48],
                              data_path=env.data,
                              random=cfg.random,
                              datatype=1)
    test_set = CACSegDataset(crop_size=[48, 48, 48],
                             data_path=env.data,
                             random=cfg.random,
                             datatype=2)

    # Define model
    model_dict = {
        'resnet18': FCNResNet,
        'resnet34': FCNResNet,
        'resnet50': FCNResNet,
        'resnet101': FCNResNet,
        'vgg16': FCNVGG,
        'densenet121': FCNDenseNet,
        'unet': UNet
    }
    model = model_dict[cfg.backbone](pretrained=cfg.pretrained,
                                     num_classes=3,
                                     backbone=cfg.backbone,
                                     checkpoint=cfg.checkpoint)  # modified
    # model.load_state_dict(torch.load('/cluster/home/it_stu167/wwj/classification_after_crop/result/CACSeg/resnet18/ACSConv/200911_104150_pretrained/model.dat'))

    # convert to counterparts and load pretrained weights according to various convolution
    if cfg.conv == 'ACSConv':
        model = model_to_syncbn(ACSConverter(model))
    if cfg.conv == 'Conv2_5d':
        model = model_to_syncbn(Conv2_5dConverter(model))
    if cfg.conv == 'Conv3d':
        if cfg.pretrained_3d == 'i3d':
            model = model_to_syncbn(Conv3dConverter(model, i3d_repeat_axis=-3))
        else:
            model = model_to_syncbn(
                Conv3dConverter(model, i3d_repeat_axis=None))
            if cfg.pretrained_3d == 'video':
                model = load_video_pretrained_weights(
                    model, env.video_resnet18_pretrain_path)
            elif cfg.pretrained_3d == 'mednet':
                model = load_mednet_pretrained_weights(
                    model, env.mednet_resnet18_pretrain_path)
    # print(model)
    torch.save(model.state_dict(), os.path.join(save_path, 'model.dat'))
    # train and test the model
    train(model=model,
          train_set=train_set,
          valid_set=valid_set,
          test_set=test_set,
          save=save_path,
          n_epochs=n_epochs)

    print('Done!')
Exemplo n.º 6
0
Arquivo: test.py Projeto: M3DV/ACSConv
               kernel_size=3,
               padding=1,
               groups=3)
out = conv(x)
'''
test the converters module
'''
from torchvision.models import resnet18
from acsconv.converters import ACSConverter
# model_2d is a standard pytorch 2D model
model_2d = resnet18(pretrained=True)
B, C_in, H, W = (1, 3, 64, 64)
input_2d = torch.rand(B, C_in, H, W)
output_2d = model_2d(input_2d)

model_3d = ACSConverter(model_2d)
# once converted, model_3d is using ACSConv and capable of processing 3D volumes.
B, C_in, D, H, W = (1, 3, 64, 64, 64)
input_3d = torch.rand(B, C_in, D, H, W)
output_3d = model_3d(input_3d)
'''
test the native ACS models
'''
from acsconv.models.acsunet import ACSUNet
unet_3d = ACSUNet(num_classes=3)
B, C_in, D, H, W = (1, 1, 64, 64, 64)
input_3d = torch.rand(B, C_in, D, H, W)
output_3d = unet_3d(input_3d)

print('==========================================')
print('The installation of ACSConv is successful!')
Exemplo n.º 7
0
def main(
        save_path=cfg.save,  # configuration file
        n_epochs=cfg.n_epochs,
        seed=cfg.seed):
    # set seed
    if seed is not None:
        set_seed(cfg.seed)
    cudnn.benchmark = True  # improve efficiency
    # back up your code
    os.makedirs(save_path)
    copy_file_backup(save_path)
    redirect_stdout(save_path)

    # Datasets
    train_set = CACTwoClassDataset(crop_size=[48, 48, 48],
                                   data_path=env.data,
                                   datatype=0,
                                   fill_with=-1)
    test_set = CACTwoClassDataset(crop_size=[48, 48, 48],
                                  data_path=env.data,
                                  datatype=1,
                                  fill_with=-1)

    # Define model
    model_dict = {
        'resnet18': ClsResNet,
        'resnet34': ClsResNet,
        'resnet50': ClsResNet,
        'resnet101': ClsResNet,
        'resnet152': ClsResNet,
        'vgg16': ClsVGG,
        'densenet121': ClsDenseNet
    }
    model = model_dict[cfg.backbone](pretrained=cfg.pretrained,
                                     num_classes=2,
                                     backbone=cfg.backbone,
                                     checkpoint=cfg.checkpoint,
                                     pooling=cfg.pooling)

    # convert to counterparts and load pretrained weights according to various convolution
    if cfg.conv == 'ACSConv':
        model = model_to_syncbn(ACSConverter(model))
    if cfg.conv == 'Conv2_5d':
        model = model_to_syncbn(Conv2_5dConverter(model))
    if cfg.conv == 'Conv3d':
        if cfg.pretrained_3d == 'i3d':
            model = model_to_syncbn(Conv3dConverter(model, i3d_repeat_axis=-3))
        else:
            model = model_to_syncbn(
                Conv3dConverter(model, i3d_repeat_axis=None))
            if cfg.pretrained_3d == 'video':
                model = load_video_pretrained_weights(
                    model, env.video_resnet18_pretrain_path)
            elif cfg.pretrained_3d == 'mednet':
                model = load_mednet_pretrained_weights(
                    model, env.mednet_resnet18_pretrain_path)
    # print(model)
    torch.save(model.state_dict(), os.path.join(save_path, 'model.dat'))
    # torch.save(model.state_dict(), os.path.join(save_path, 'model.pth'))
    # train and test the model
    train(model=model,
          train_set=train_set,
          test_set=test_set,
          save=save_path,
          n_epochs=n_epochs)

    print('Done!')
Exemplo n.º 8
0
def main():
    torch.cuda.empty_cache()
    torch.cuda.empty_cache()

    INIT_LR = 1e-3
    BS = 1
    EPOCHS = 100
    DIMS = (256, 256, 192, 1)

    model = smp.Unet(
        'efficientnet-b0',
        encoder_weights=
        "imagenet",  # choose encoder, e.g. mobilenet_v2 or efficientnet-b7
        #encoder_weights="imagenet",  # use `imagenet` pre-trained weights for encoder initialization
        encoder_depth=3,
        decoder_channels=[256, 128, 64],
        in_channels=
        1,  # model input channels (1 for gray-scale images, 3 for RGB, etc.)
        classes=2,  # model output channels (number of classes in your dataset)
    )

    #convert to ACS
    model_3d = ACSConverter(model)
    summary(model_3d, (DIMS[-1], DIMS[0], DIMS[1], DIMS[2]))

    ### path
    CT_paths = glob.glob(
        r"D:\FISICA MEDICA\CT_LUNG\ieo_CT_lung_nrrd\CT\CT\*.nrrd")
    ROI_paths = glob.glob(
        r"D:\FISICA MEDICA\CT_LUNG\ieo_CT_lung_nrrd\ROI\ROI\*.nrrd")

    #unet_3d = ACSUnet(num_classes=2)

    ## datagenerator for pytorch
    class PytorchDataGenerator(torch.utils.data.Dataset):
        'Characterizes a dataset for PyTorch'

        def __init__(self,
                     list_CTs,
                     list_ROI,
                     dim=(512, 512, 192, 1),
                     upper=192,
                     shuffle=True,
                     normalize=False):
            self.dim = dim
            self.list_ROI = list_ROI
            self.list_CTs = list_CTs
            self.shuffle = shuffle
            self.upper = upper
            self.type = "train"
            self.normalize = normalize

        def __len__(self):
            # Denotes the total lenghtù
            return len(self.list_CTs)

        def __getitem__(self, index):
            # Generate one sample of data
            # Generate indexes of the batch
            #indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]

            # Find list of IDs
            #list_IDs_temp = [self.list_CTs[k] for k in indexes]
            #list_ROIs_temp = [self.list_ROI[k] for k in indexes]
            list_IDs_temp = self.list_CTs[index]
            list_ROIs_temp = self.list_ROI[index]
            # Generate data
            X, y = self.data_generation(list_IDs_temp, list_ROIs_temp)

            #roba sua
            #X = X.transpose((2, 0, 1))  # make image C x H x W
            X = torch.from_numpy(X)
            y = torch.from_numpy(y)
            # normalise image only if using mobilenet
            return X, y

        def data_generation(self, list_IDs_temp, list_ROIs_temp):
            #X = np.empty((self.batch_size, *self.dim))
            #y = np.empty((self.batch_size, *self.dim))

            # y = np.empty((self.batch_size), dtype=int)

            # Generate data

            n_x, _ = nrrd.read(list_IDs_temp)  #è solo un elemento

            X = self.preprocess(n_x)
            n_y, _ = nrrd.read(list_ROIs_temp)  #è solo uno
            # Store class
            y = self.preprocess(n_y)
            # for i, ID in enumerate(list_IDs_temp):
            #     # Store sample
            #
            #     n_x, _ = nrrd.read(ID)
            #
            #     X[i,] = self.preprocess(n_x)
            #
            #     ROI_ID = list_ROIs_temp[i]
            #     n_y, _ = nrrd.read(ROI_ID)
            #     # Store class
            #     y[i,] = self.preprocess(n_y)
            if self.normalize:
                X = (X - np.min(X)) / (np.max(X) - np.min(X))
                y = (y - np.min(y)) / (np.max(y) - np.min(y))

            return X, y

        def preprocess(self, raw):

            # print(f"[DEBUG] {raw.shape}")
            # resizing
            if self.dim[:2] != raw.shape[:-1]:
                output = np.zeros((self.dim[0], self.dim[1], self.dim[2]))
                for i in range(raw.shape[-1]):
                    output[:, :,
                           i] = cv2.resize(raw[:, :, i].astype('float32'),
                                           (self.dim[0], self.dim[1]))
                raw = output
            # print(f"[DEBUG - RES] {raw.shape}")
            # check the third dimension
            if raw.shape[2] < self.upper:

                ##pad the image with zeros

                if (self.upper - raw.shape[2]) % 2 == 0:
                    w = int((self.upper - raw.shape[-1]) / 2)
                    u = np.zeros((self.dim[0], self.dim[1], w))
                    raw = np.concatenate((u, raw, u), axis=-1)
                else:
                    w = int((self.upper - raw.shape[-1] - 1) / 2)
                    u = np.zeros((self.dim[0], self.dim[1], w))
                    d = np.zeros((self.dim[0], self.dim[1], w + 1))
                    raw = np.concatenate((u, raw, d), axis=-1)

            elif raw.shape[2] > self.upper:
                # crop the image along the third dimension
                dh = int(raw.shape[2] - self.upper / 2)
                raw = raw[:, :, dh:-dh]
            # print(f"[DEBUG2] {raw.shape}")

            return np.expand_dims(raw, 0)

    X_train, X_test, y_train, y_test = train_test_split(CT_paths,
                                                        ROI_paths,
                                                        test_size=0.30,
                                                        random_state=42)

    X_train, X_val, y_train, y_val = train_test_split(X_train,
                                                      y_train,
                                                      test_size=0.15,
                                                      random_state=24)

    train_set = PytorchDataGenerator(X_train, y_train, dim=DIMS)
    train_gen = torch.utils.data.DataLoader(train_set,
                                            batch_size=BS,
                                            shuffle=True)

    val_set = PytorchDataGenerator(X_val, y_val, dim=DIMS)
    val_gen = torch.utils.data.DataLoader(val_set, batch_size=BS, shuffle=True)

    test_set = PytorchDataGenerator(X_test, y_test, dim=DIMS)
    test_gen = torch.utils.data.DataLoader(test_set,
                                           batch_size=BS,
                                           shuffle=True)

    # Setup tensorboard
    if torch.cuda.is_available():
        dev = "cuda:0"
        print(f"[INFO] using {torch.cuda.get_device_name(0)}")
        device = torch.device(dev)
    else:
        dev = "cpu"
        device = torch.device(dev)

    ##OPT

    loss = smp.utils.losses.DiceLoss()
    metrics = [
        smp.utils.metrics.IoU(threshold=0.5),
    ]

    optimizer = torch.optim.Adam([
        dict(params=model.parameters(), lr=0.0001),
    ])

    best_val_acc = 0  # for model check pointing
    # Epoch loop
    for epoch in range(1, EPOCHS + 1):
        start_time = time.time()
        # Reset metrics
        train_loss = 0.0
        val_loss = 0.0
        train_correct = 0.0
        val_correct = 0.0

        # Training loop
        model.train()
        for inputs, targets in train_gen:

            # use GPU if available
            inputs = inputs.to(device)
            targets = targets.to(device)
            inputs = inputs.float()
            targets = targets.view(-1, 1).float()

            # Training steps
            optimizer.zero_grad()  # clear gradients
            output = model(
                inputs)  # forward pass: predict outputs for each image
            loss = loss(output, targets)  # calculate loss
            loss.backward(
            )  # backward pass: compute gradient of the loss wrt model parameters
            optimizer.step()  # update parameters
            train_loss += loss.item() * inputs.size(0)  # update training loss
            train_correct += ((output > 0.5) == targets
                              ).float().sum()  # update training accuracy

        # Validation loop
        model.eval()
        for inputs, targets in val_gen:
            # use GPU if available
            inputs = inputs.to(device)
            targets = targets.to(device)
            inputs = inputs.float()
            targets = targets.view(-1, 1).float()

            # Validation steps
            with torch.no_grad():  #not calculating gradients every step
                output = model(
                    inputs)  # forward pass: predict outputs for each image
                loss = loss(output, targets)  # calculate loss
                val_loss += loss.item() * inputs.size(
                    0)  # update validation loss
                val_correct += ((output > 0.5) == targets
                                ).float().sum()  # update validation accuracy

        # calculate average losses and accuracy
        train_loss = train_loss / len(train_gen.sampler)
        val_loss = val_loss / len(val_gen.sampler)
        train_acc = train_correct / len(train_gen.sampler)
        val_acc = val_correct / len(val_gen.sampler)
        end_time = time.time()  # get time taken for epoch

        # Display metrics at the end of each epoch.
        print(
            f'Epoch: {epoch} \tTraining Loss: {train_loss} \tValidation Loss: {val_loss} \tTraining Accuracy: {train_acc} \tValidation Accuracy: {val_acc} \t Time taken: {end_time - start_time}'
        )

        # Log metrics to tensorboard
        #file_writer.add_scalar('Loss/train', train_loss, epoch)
        #file_writer.add_scalar('Loss/validation', val_loss, epoch)
        #file_writer.add_scalar('Accuracy/train', train_acc, epoch)
        #file_writer.add_scalar('Accuracy/validation', val_acc, epoch)
        #file_writer.add_scalar('epoch_time', end_time - start_time, epoch)

        # checkpoint if improved
        if val_acc > best_val_acc:
            state_dict = model.state_dict()
            torch.save(state_dict, "pytorch_acsunet" + '.pt')
            best_val_acc = val_acc