コード例 #1
0
def test_split():
    """Checks that:
     -  split and kfold are working
     -  the loading functions can find the output
     -  no data leakage is introduced in split and kfold.
     """
    n_splits = 5
    flag_split = not os.system("clinicadl tsvtool split %s %s --age_name age" %
                               (merged_tsv, reference_path))
    flag_kfold = not os.system("clinicadl tsvtool kfold %s --n_splits %i" %
                               (path.join(reference_path, "train"), n_splits))
    assert flag_split
    assert flag_kfold
    flag_load = True
    try:
        _ = load_data_test(path.join(reference_path, "test"),
                           diagnoses.split(" "))
        for fold in range(n_splits):
            _, _ = load_data(path.join(reference_path, "train"),
                             diagnoses.split(" "),
                             fold,
                             n_splits=n_splits)
    except FileNotFoundError:
        flag_load = False
    assert flag_load

    run_test_suite(reference_path, 0, "test")
    run_test_suite(path.join(reference_path, "train"), n_splits, "validation")

    shutil.rmtree(path.join(reference_path, "train"))
    shutil.rmtree(path.join(reference_path, "test"))
コード例 #2
0
def train_CNN_bad_data_split(params):

    # Initialize the model
    print('Do transfer learning with existed model trained on ImageNet!\n')
    print('The chosen network is %s !' % params.model)

    # most of the imagenet pretrained model has this input size
    trg_size = (224, 224)

    # All pre-trained models expect input images normalized in the same way,
    # i.e. mini-batches of 3-channel RGB images of shape (3 x H x W), where H
    # and W are expected to be at least 224. The images have to be loaded in to
    # a range of [0, 1] and then normalized using mean = [0.485, 0.456, 0.406]
    # and std = [0.229, 0.224, 0.225].
    transformations = transforms.Compose([
        MinMaxNormalization(),
        transforms.ToPILImage(),
        transforms.Resize(trg_size),
        transforms.ToTensor()
    ])
    params.dropout = 0.8

    total_time = time()

    if params.split is None:
        if params.n_splits is None:
            fold_iterator = range(1)
        else:
            fold_iterator = range(params.n_splits)
    else:
        fold_iterator = [params.split]

    for fi in fold_iterator:
        print("Running for the %d-th fold" % fi)

        training_sub_df, valid_sub_df = load_data(params.tsv_path,
                                                  params.diagnoses,
                                                  fi,
                                                  n_splits=params.n_splits,
                                                  baseline=params.baseline)

        # split the training + validation by slice
        training_df, valid_df = mix_slices(training_sub_df,
                                           valid_sub_df,
                                           mri_plane=params.mri_plane)

        data_train = MRIDatasetSlice(params.caps_directory,
                                     training_df,
                                     transformations=transformations,
                                     mri_plane=params.mri_plane,
                                     prepare_dl=params.prepare_dl,
                                     mixed=True)

        data_valid = MRIDatasetSlice(params.caps_directory,
                                     valid_df,
                                     transformations=transformations,
                                     mri_plane=params.mri_plane,
                                     prepare_dl=params.prepare_dl,
                                     mixed=True)

        # Use argument load to distinguish training and testing
        train_loader = DataLoader(data_train,
                                  batch_size=params.batch_size,
                                  shuffle=True,
                                  num_workers=params.num_workers,
                                  pin_memory=True)

        valid_loader = DataLoader(data_valid,
                                  batch_size=params.batch_size,
                                  shuffle=False,
                                  num_workers=params.num_workers,
                                  pin_memory=True)

        # Initialize the model
        print('Initialization of the model')
        model = init_model(params.model,
                           gpu=params.gpu,
                           dropout=params.dropout)

        # Define criterion and optimizer
        criterion = torch.nn.CrossEntropyLoss()
        optimizer = eval("torch.optim." + params.optimizer)(
            filter(lambda x: x.requires_grad, model.parameters()),
            lr=params.learning_rate,
            weight_decay=params.weight_decay)
        setattr(params, 'beginning_epoch', 0)

        # Define output directories
        log_dir = os.path.join(params.output_dir, 'fold-%i' % fi,
                               'tensorboard_logs')
        model_dir = os.path.join(params.output_dir, 'fold-%i' % fi, 'models')

        print('Beginning the training task')
        train(model, train_loader, valid_loader, criterion, optimizer, False,
              log_dir, model_dir, params)

        test_cnn(train_loader, "train", fi, criterion, options)
        test_cnn(valid_loader, "validation", fi, criterion, options)

    total_time = time() - total_time
    print("Total time of computation: %d s" % total_time)
コード例 #3
0
    transformations = get_transforms(model_options.mode,
                                     model_options.minmaxnormalization)
    criterion = nn.CrossEntropyLoss()

    # Loop on all folds trained
    best_model_dir = os.path.join(options.model_path, 'best_model_dir')
    folds_dir = os.listdir(best_model_dir)

    for fold_dir in folds_dir:
        split = int(fold_dir[-1])
        print("Fold %i" % split)

        # Data management
        training_df, valid_df = load_data(model_options.tsv_path,
                                          model_options.diagnoses, split,
                                          model_options.n_splits,
                                          model_options.baseline)

        for cnn_index in range(num_cnn):
            data_train = return_dataset(model_options.mode,
                                        model_options.input_dir,
                                        training_df,
                                        model_options.preprocessing,
                                        transformations,
                                        model_options,
                                        cnn_index=cnn_index)
            data_valid = return_dataset(model_options.mode,
                                        model_options.input_dir,
                                        valid_df,
                                        model_options.preprocessing,
                                        transformations,
コード例 #4
0
ファイル: resume_autoencoder.py プロジェクト: CC0624/AD-DL
def main(options):

    options = read_json(options)

    if options.evaluation_steps % options.accumulation_steps != 0 and options.evaluation_steps != 1:
        raise Exception(
            'Evaluation steps %d must be a multiple of accumulation steps %d' %
            (options.evaluation_steps, options.accumulation_steps))

    if options.minmaxnormalization:
        transformations = MinMaxNormalization()
    else:
        transformations = None

    total_time = time()

    # Get the data.
    training_tsv, valid_tsv = load_data(options.diagnosis_path,
                                        options.diagnoses, options.split,
                                        options.n_splits, options.baseline)

    data_train = MRIDataset(options.input_dir,
                            training_tsv,
                            transform=transformations,
                            preprocessing=options.preprocessing)
    data_valid = MRIDataset(options.input_dir,
                            valid_tsv,
                            transform=transformations,
                            preprocessing=options.preprocessing)

    # Use argument load to distinguish training and testing
    train_loader = DataLoader(data_train,
                              batch_size=options.batch_size,
                              shuffle=True,
                              num_workers=options.num_workers,
                              pin_memory=True,
                              drop_last=options.drop_last)

    valid_loader = DataLoader(data_valid,
                              batch_size=options.batch_size,
                              shuffle=False,
                              num_workers=options.num_workers,
                              pin_memory=True,
                              drop_last=options.drop_last)

    # Initialize the model
    print('Initialization of the model')
    decoder = create_autoencoder(options.model)

    decoder, current_epoch = load_model(decoder,
                                        options.model_path,
                                        options.gpu,
                                        'checkpoint.pth.tar',
                                        device_index=options.device)
    if options.gpu:
        device = torch.device('cuda:{}'.format(options.device))
        decoder = decoder.to(device)

    options.beginning_epoch = current_epoch + 1

    # Define criterion and optimizer
    criterion = torch.nn.MSELoss()
    optimizer_path = path.join(options.model_path, 'optimizer.pth.tar')
    optimizer = load_optimizer(optimizer_path, decoder)

    # Define output directories
    log_dir = path.join(options.output_dir, 'log_dir',
                        'fold_%i' % options.split, 'ConvAutoencoder')
    visualization_dir = path.join(options.output_dir, 'visualize',
                                  'fold_%i' % options.split)
    model_dir = path.join(options.output_dir, 'best_model_dir',
                          'fold_%i' % options.split, 'ConvAutoencoder')

    print('Resuming the training task')
    train(decoder, train_loader, valid_loader, criterion, optimizer, False,
          log_dir, model_dir, options)

    if options.visualization:
        print("Visualization of autoencoder reconstruction")
        best_decoder, _ = load_model(decoder,
                                     path.join(model_dir, "best_loss"),
                                     options.gpu,
                                     filename='model_best.pth.tar',
                                     device_index=options.device)
        visualize_image(best_decoder,
                        valid_loader,
                        path.join(visualization_dir, "validation"),
                        nb_images=3,
                        device_index=options.device)
        visualize_image(best_decoder,
                        train_loader,
                        path.join(visualization_dir, "train"),
                        nb_images=3,
                        device_index=options.device)
    del decoder
    torch.cuda.empty_cache()

    total_time = time() - total_time
    print("Total time of computation: %d s" % total_time)
コード例 #5
0
ファイル: evaluation_test.py プロジェクト: sourav22899/AD-DL
def main(options):

    # Initialize the model
    print('Do transfer learning with existed model trained on ImageNet.')

    model = create_model(options.network, options.gpu)
    trg_size = (224, 224
                )  # most of the imagenet pretrained model has this input size

    # All pre-trained models expect input images normalized in the same way, i.e. mini-batches of 3-channel RGB
    # images of shape (3 x H x W), where H and W are expected to be at least 224. The images have to be loaded in
    # to a range of [0, 1] and then normalized using mean = [0.485, 0.456, 0.406] and std = [0.229, 0.224, 0.225].
    transformations = transforms.Compose([
        MinMaxNormalization(),
        transforms.ToPILImage(),
        transforms.Resize(trg_size),
        transforms.ToTensor()
    ])
    # Define loss and optimizer
    loss = torch.nn.CrossEntropyLoss()

    if options.split is None:
        fold_iterator = range(options.n_splits)
    else:
        fold_iterator = [options.split]

    # Loop on folds
    for fi in fold_iterator:
        print("Fold %i" % fi)

        if options.dataset == 'validation':
            _, test_df = load_data(options.diagnosis_tsv_path,
                                   options.diagnoses,
                                   fi,
                                   n_splits=options.n_splits,
                                   baseline=True)
        else:
            test_df = load_data_test(options.diagnosis_tsv_path,
                                     options.diagnoses)

        data_test = MRIDataset_slice(options.caps_directory,
                                     test_df,
                                     transformations=transformations,
                                     mri_plane=options.mri_plane,
                                     prepare_dl=options.prepare_dl)

        test_loader = DataLoader(data_test,
                                 batch_size=options.batch_size,
                                 shuffle=False,
                                 num_workers=options.num_workers,
                                 pin_memory=True)

        # load the best trained model during the training
        model, best_epoch = load_model(model,
                                       os.path.join(options.output_dir,
                                                    'best_model_dir',
                                                    "fold_%i" % fi, 'CNN',
                                                    str(options.selection)),
                                       gpu=options.gpu,
                                       filename='model_best.pth.tar')

        results_df, metrics = test(model, test_loader, options.gpu, loss)
        print("Slice level balanced accuracy is %f" %
              (metrics['balanced_accuracy']))

        slice_level_to_tsvs(options.output_dir,
                            results_df,
                            metrics,
                            fi,
                            options.selection,
                            dataset=options.dataset)

        # Soft voting
        soft_voting_to_tsvs(options.output_dir,
                            fi,
                            selection=options.selection,
                            dataset=options.dataset,
                            selection_threshold=options.selection_threshold)
コード例 #6
0
def train_CNN_bad_data_split(params):

    # Initialize the model
    print('Do transfer learning with existed model trained on ImageNet!\n')
    print('The chosen network is %s !' % params.network)

    model = create_model(params.network, params.gpu)
    trg_size = (224, 224
                )  # most of the imagenet pretrained model has this input size

    # All pre-trained models expect input images normalized in the same way,
    # i.e. mini-batches of 3-channel RGB images of shape (3 x H x W), where H
    # and W are expected to be at least 224. The images have to be loaded in to
    # a range of [0, 1] and then normalized using mean = [0.485, 0.456, 0.406]
    # and std = [0.229, 0.224, 0.225].
    transformations = transforms.Compose([
        MinMaxNormalization(),
        transforms.ToPILImage(),
        transforms.Resize(trg_size),
        transforms.ToTensor()
    ])

    total_time = time()
    init_state = copy.deepcopy(model.state_dict())

    if params.split is None:
        fold_iterator = range(params.n_splits)
    else:
        fold_iterator = [params.split]

    for fi in fold_iterator:
        print("Running for the %d-th fold" % fi)

        training_sub_df, valid_sub_df = load_data(params.tsv_path,
                                                  params.diagnoses,
                                                  fi,
                                                  n_splits=params.n_splits,
                                                  baseline=params.baseline)

        # split the training + validation by slice
        training_df, valid_df = mix_slices(training_sub_df,
                                           valid_sub_df,
                                           mri_plane=params.mri_plane)

        data_train = MRIDataset_slice_mixed(params.caps_directory,
                                            training_df,
                                            transformations=transformations,
                                            mri_plane=params.mri_plane,
                                            prepare_dl=params.prepare_dl)

        data_valid = MRIDataset_slice_mixed(params.caps_directory,
                                            valid_df,
                                            transformations=transformations,
                                            mri_plane=params.mri_plane,
                                            prepare_dl=params.prepare_dl)

        # Use argument load to distinguish training and testing
        train_loader = DataLoader(data_train,
                                  batch_size=params.batch_size,
                                  shuffle=True,
                                  num_workers=params.num_workers,
                                  pin_memory=True)

        valid_loader = DataLoader(data_valid,
                                  batch_size=params.batch_size,
                                  shuffle=False,
                                  num_workers=params.num_workers,
                                  pin_memory=True)

        # chosen optimizer for back-propagation
        optimizer = eval("torch.optim." + params.optimizer)(
            filter(lambda x: x.requires_grad, model.parameters()),
            params.learning_rate,
            weight_decay=params.weight_decay)

        model.load_state_dict(init_state)

        # Binary cross-entropy loss
        loss = torch.nn.CrossEntropyLoss()

        # parameters used in training
        best_accuracy = 0.0
        best_loss_valid = np.inf

        writer_train_batch = SummaryWriter(
            log_dir=(os.path.join(params.output_dir, "log_dir", "fold_" +
                                  str(fi), "train_batch")))
        writer_train_all_data = SummaryWriter(
            log_dir=(os.path.join(params.output_dir, "log_dir", "fold_" +
                                  str(fi), "train_all_data")))

        writer_valid = SummaryWriter(
            log_dir=(os.path.join(params.output_dir, "log_dir", "fold_" +
                                  str(fi), "valid")))

        # initialize the early stopping instance
        early_stopping = EarlyStopping('min',
                                       min_delta=params.tolerance,
                                       patience=params.patience)

        for epoch in range(params.epochs):
            print("At %i-th epoch." % epoch)

            # train the model
            train_df, acc_mean_train, loss_batch_mean_train, global_step \
                = train(
                        model,
                        train_loader,
                        params,
                        loss,
                        optimizer,
                        writer_train_batch,
                        epoch,
                        model_mode='train',
                        selection_threshold=params.selection_threshold
                        )

            # calculate the accuracy with the whole training data to monitor overfitting
            train_all_df, acc_mean_train_all, loss_batch_mean_train_all, _\
                = train(
                        model,
                        train_loader,
                        params.gpu,
                        loss,
                        optimizer,
                        writer_train_all_data,
                        epoch,
                        model_mode='valid',
                        selection_threshold=params.selection_threshold
                        )

            print(
                "For training, subject level balanced accuracy is %f at the end of epoch %d"
                % (acc_mean_train_all, epoch))

            # at then end of each epoch, we validate one time for the model with the validation data
            valid_df, acc_mean_valid, loss_batch_mean_valid, _ \
                = train(
                        model,
                        valid_loader,
                        params.gpu,
                        loss,
                        optimizer,
                        writer_valid,
                        epoch,
                        model_mode='valid',
                        selection_threshold=params.selection_threshold
                        )

            print(
                "For validation, subject level balanced accuracy is %f at the end of epoch %d"
                % (acc_mean_valid, epoch))

            # save the best model based on the best loss and accuracy
            acc_is_best = acc_mean_valid > best_accuracy
            best_accuracy = max(best_accuracy, acc_mean_valid)
            loss_is_best = loss_batch_mean_valid < best_loss_valid
            best_loss_valid = min(loss_batch_mean_valid, best_loss_valid)

            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'model': model.state_dict(),
                    'loss': loss_batch_mean_valid,
                    'accuracy': acc_mean_valid,
                    'optimizer': optimizer.state_dict(),
                    'global_step': global_step
                }, acc_is_best, loss_is_best,
                os.path.join(params.output_dir, "best_model_dir",
                             "fold_" + str(fi), "CNN"))

            # try early stopping criterion
            if early_stopping.step(
                    loss_batch_mean_valid) or epoch == params.epochs - 1:
                print(
                    "By applying early stopping or at the last epoch defined by user, "
                    "the training is stopped at %d-th epoch" % epoch)

                break

        # Final evaluation for all criteria
        for selection in ['best_loss', 'best_acc']:
            model, best_epoch = load_model(
                model,
                os.path.join(params.output_dir, 'best_model_dir',
                             'fold_%i' % fi, 'CNN', str(selection)),
                gpu=params.gpu,
                filename='model_best.pth.tar')

            train_df, metrics_train = test(model, train_loader, params.gpu,
                                           loss)
            valid_df, metrics_valid = test(model, valid_loader, params.gpu,
                                           loss)

            # write the information of subjects and performances into tsv files.
            slice_level_to_tsvs(params.output_dir,
                                train_df,
                                metrics_train,
                                fi,
                                dataset='train',
                                selection=selection)
            slice_level_to_tsvs(params.output_dir,
                                valid_df,
                                metrics_valid,
                                fi,
                                dataset='validation',
                                selection=selection)

            soft_voting_to_tsvs(params.output_dir,
                                fi,
                                dataset='train',
                                selection=selection,
                                selection_threshold=params.selection_threshold)
            soft_voting_to_tsvs(params.output_dir,
                                fi,
                                dataset='validation',
                                selection=selection,
                                selection_threshold=params.selection_threshold)
            torch.cuda.empty_cache()

    total_time = time() - total_time
    print("Total time of computation: %d s" % total_time)
コード例 #7
0
ファイル: evaluation_multiCNN.py プロジェクト: basrie/AD-DL
def main(options):
    # Initialize the model
    model = create_model(options.network, options.gpu)
    transformations = transforms.Compose([MinMaxNormalization()])

    # Define loss and optimizer
    loss = torch.nn.CrossEntropyLoss()

    if options.split is None:
        fold_iterator = range(options.n_splits)
    else:
        fold_iterator = [options.split]

    # Loop on folds
    for fi in fold_iterator:
        print("Fold %i" % fi)

        if options.dataset == 'validation':
            _, test_df = load_data(options.diagnosis_tsv_path,
                                   options.diagnoses,
                                   fi,
                                   n_splits=options.n_splits,
                                   baseline=True)
        else:
            test_df = load_data_test(options.diagnosis_tsv_path,
                                     options.diagnoses)

        for n in range(options.num_cnn):

            dataset = MRIDataset_patch(options.caps_directory,
                                       test_df,
                                       options.patch_size,
                                       options.patch_stride,
                                       transformations=transformations,
                                       patch_index=n,
                                       prepare_dl=options.prepare_dl)

            test_loader = DataLoader(dataset,
                                     batch_size=options.batch_size,
                                     shuffle=False,
                                     num_workers=options.num_workers,
                                     pin_memory=True)

            # load the best trained model during the training
            model, best_epoch = load_model(
                model,
                os.path.join(options.output_dir, 'best_model_dir',
                             "fold_%i" % fi, 'cnn-%i' % n, options.selection),
                options.gpu,
                filename='model_best.pth.tar')

            results_df, metrics = test(model, test_loader, options.gpu, loss)
            print("Patch level balanced accuracy is %f" %
                  metrics['balanced_accuracy'])

            # write the test results into the tsv files
            patch_level_to_tsvs(options.output_dir,
                                results_df,
                                metrics,
                                fi,
                                options.selection,
                                dataset=options.dataset,
                                cnn_index=n)

        print("Selection threshold: ", options.selection_threshold)
        soft_voting_to_tsvs(options.output_dir,
                            fi,
                            options.selection,
                            dataset=options.dataset,
                            num_cnn=options.num_cnn,
                            selection_threshold=options.selection_threshold)
コード例 #8
0
def main(options):

    options = read_json(options)

    if options.evaluation_steps % options.accumulation_steps != 0 and options.evaluation_steps != 1:
        raise Exception(
            'Evaluation steps %d must be a multiple of accumulation steps %d' %
            (options.evaluation_steps, options.accumulation_steps))

    if options.minmaxnormalization:
        transformations = MinMaxNormalization()
    else:
        transformations = None

    total_time = time()

    # Get the data.
    training_tsv, valid_tsv = load_data(options.diagnosis_path,
                                        options.diagnoses, options.split,
                                        options.n_splits, options.baseline)

    data_train = MRIDataset(options.input_dir,
                            training_tsv,
                            transform=transformations,
                            preprocessing=options.preprocessing)
    data_valid = MRIDataset(options.input_dir,
                            valid_tsv,
                            transform=transformations,
                            preprocessing=options.preprocessing)

    # Use argument load to distinguish training and testing
    train_loader = DataLoader(data_train,
                              batch_size=options.batch_size,
                              shuffle=True,
                              num_workers=options.num_workers,
                              pin_memory=True,
                              drop_last=options.drop_last)

    valid_loader = DataLoader(data_valid,
                              batch_size=options.batch_size,
                              shuffle=False,
                              num_workers=options.num_workers,
                              pin_memory=True,
                              drop_last=options.drop_last)

    # Initialize the model
    print('Initialization of the model')
    if options.model == 'UNet3D':
        print('********** init UNet3D model for test! **********')
        model = create_model(options.model,
                             gpu=options.gpu,
                             dropout=options.dropout,
                             device_index=options.device,
                             in_channels=options.in_channels,
                             out_channels=options.out_channels,
                             f_maps=options.f_maps,
                             layer_order=options.layer_order,
                             num_groups=options.num_groups,
                             num_levels=options.num_levels)
    elif options.model == 'ResidualUNet3D':
        print('********** init ResidualUNet3D model for test! **********')
        model = create_model(options.model,
                             gpu=options.gpu,
                             dropout=options.dropout,
                             device_index=options.device,
                             in_channels=options.in_channels,
                             out_channels=options.out_channels,
                             f_maps=options.f_maps,
                             layer_order=options.layer_order,
                             num_groups=options.num_groups,
                             num_levels=options.num_levels)
    elif options.model == 'UNet3D_add_more_fc':
        print('********** init UNet3D_add_more_fc model for test! **********')
        model = create_model(options.model,
                             gpu=options.gpu,
                             dropout=options.dropout,
                             device_index=options.device,
                             in_channels=options.in_channels,
                             out_channels=options.out_channels,
                             f_maps=options.f_maps,
                             layer_order=options.layer_order,
                             num_groups=options.num_groups,
                             num_levels=options.num_levels)
    elif options.model == 'ResidualUNet3D_add_more_fc':
        print(
            '********** init ResidualUNet3D_add_more_fc model for test! **********'
        )
        model = create_model(options.model,
                             gpu=options.gpu,
                             dropout=options.dropout,
                             device_index=options.device,
                             in_channels=options.in_channels,
                             out_channels=options.out_channels,
                             f_maps=options.f_maps,
                             layer_order=options.layer_order,
                             num_groups=options.num_groups,
                             num_levels=options.num_levels)
    elif options.model == 'VoxCNN':
        print('********** init VoxCNN model for test! **********')
        model = create_model(options.model,
                             gpu=options.gpu,
                             device_index=options.device)
    elif options.model == 'ConvNet3D':
        print('********** init ConvNet3D model for test! **********')
        model = create_model(options.model,
                             gpu=options.gpu,
                             device_index=options.device)
    elif 'gcn' in options.model:
        print('********** init {}-{} model for test! **********'.format(
            options.model, options.gnn_type))
        model = create_model(
            options.model,
            gpu=options.gpu,
            device_index=options.device,
            gnn_type=options.gnn_type,
            gnn_dropout=options.gnn_dropout,
            gnn_dropout_adj=options.gnn_dropout_adj,
            gnn_non_linear=options.gnn_non_linear,
            gnn_undirected=options.gnn_undirected,
            gnn_self_loop=options.gnn_self_loop,
            gnn_threshold=options.gnn_threshold,
        )
    elif options.model == 'ROI_GCN':
        print('********** init ROI_GCN model for test! **********')
        model = create_model(options.model,
                             gpu=options.gpu,
                             device_index=options.device,
                             gnn_type=options.gnn_type,
                             gnn_dropout=options.gnn_dropout,
                             gnn_dropout_adj=options.gnn_dropout_adj,
                             gnn_non_linear=options.gnn_non_linear,
                             gnn_undirected=options.gnn_undirected,
                             gnn_self_loop=options.gnn_self_loop,
                             gnn_threshold=options.gnn_threshold,
                             nodel_vetor_layer=options.nodel_vetor_layer,
                             classify_layer=options.classify_layer,
                             num_node_features=options.num_node_features,
                             num_class=options.num_class,
                             roi_size=options.roi_size,
                             num_nodes=options.num_nodes,
                             gnn_pooling_layers=options.gnn_pooling_layers,
                             global_sort_pool_k=options.global_sort_pool_k,
                             layers=options.layers,
                             shortcut_type=options.shortcut_type,
                             use_nl=options.use_nl,
                             dropout=options.dropout,
                             device=options.device)
    elif options.model == 'SwinTransformer3d':
        print('********** init SwinTransformer3d model for test! **********')
        model = create_model(
            options.model,
            gpu=options.gpu,
            dropout=options.dropout,
            device_index=options.device,
            sw_patch_size=options.sw_patch_size,
            window_size=options.window_size,
            mlp_ratio=options.mlp_ratio,
            drop_rate=options.drop_rate,
            attn_drop_rate=options.attn_drop_rate,
            drop_path_rate=options.drop_path_rate,
            qk_scale=options.qk_scale,
            embed_dim=options.embed_dim,
            depths=options.depths,
            num_heads=options.num_heads,
            qkv_bias=options.qkv_bias,
            ape=options.ape,
            patch_norm=options.patch_norm,
        )
    else:
        model = create_model(options.model,
                             gpu=options.gpu,
                             dropout=options.dropout,
                             device_index=options.device)
    model_dir = path.join(options.model_path, "best_model_dir", "CNN",
                          "fold_" + str(options.split))
    model, current_epoch = load_model(model,
                                      model_dir,
                                      options.gpu,
                                      'checkpoint.pth.tar',
                                      device_index=options.device)

    options.beginning_epoch = current_epoch + 1

    # Define criterion and optimizer
    criterion = torch.nn.CrossEntropyLoss()
    optimizer_path = path.join(options.model_path, 'optimizer.pth.tar')
    optimizer = load_optimizer(optimizer_path, model)

    # Define output directories
    log_dir = path.join(options.output_dir, 'log_dir',
                        'fold_%i' % options.split, 'CNN')
    model_dir = path.join(options.output_dir, 'best_model_dir',
                          'fold_%i' % options.split, 'CNN')

    print('Resuming the training task')
    train(model, train_loader, valid_loader, criterion, optimizer, True,
          log_dir, model_dir, options)

    options.model_path = options.output_dir
    test_cnn(train_loader, "train", options.split, criterion, options)
    test_cnn(valid_loader, "validation", options.split, criterion, options)

    total_time = time() - total_time
    print("Total time of computation: %d s" % total_time)
コード例 #9
0
def main(options):

    options = read_json(options)

    if options.evaluation_steps % options.accumulation_steps != 0 and options.evaluation_steps != 1:
        raise Exception(
            'Evaluation steps %d must be a multiple of accumulation steps %d' %
            (options.evaluation_steps, options.accumulation_steps))

    if options.minmaxnormalization:
        transformations = MinMaxNormalization()
    else:
        transformations = None

    total_time = time()

    # Get the data.
    training_tsv, valid_tsv = load_data(options.diagnosis_path,
                                        options.diagnoses, options.split,
                                        options.n_splits, options.baseline)

    data_train = MRIDataset(options.input_dir,
                            training_tsv,
                            transform=transformations,
                            preprocessing=options.preprocessing)
    data_valid = MRIDataset(options.input_dir,
                            valid_tsv,
                            transform=transformations,
                            preprocessing=options.preprocessing)

    # Use argument load to distinguish training and testing
    train_loader = DataLoader(data_train,
                              batch_size=options.batch_size,
                              shuffle=True,
                              num_workers=options.num_workers,
                              pin_memory=True)

    valid_loader = DataLoader(data_valid,
                              batch_size=options.batch_size,
                              shuffle=False,
                              num_workers=options.num_workers,
                              pin_memory=True)

    # Initialize the model
    print('Initialization of the model')
    model = create_model(options.model, options.gpu, dropout=options.dropout)
    model_dir = path.join(options.model_path, "best_model_dir", "CNN",
                          "fold_" + str(options.split))
    model, current_epoch = load_model(model, model_dir, options.gpu,
                                      'checkpoint.pth.tar')

    options.beginning_epoch = current_epoch + 1

    # Define criterion and optimizer
    criterion = torch.nn.CrossEntropyLoss()
    optimizer_path = path.join(options.model_path, 'optimizer.pth.tar')
    optimizer = load_optimizer(optimizer_path, model)

    # Define output directories
    log_dir = path.join(options.output_dir, 'log_dir',
                        'fold_%i' % options.split, 'CNN')
    model_dir = path.join(options.output_dir, 'best_model_dir',
                          'fold_%i' % options.split, 'CNN')

    print('Resuming the training task')
    train(model, train_loader, valid_loader, criterion, optimizer, True,
          log_dir, model_dir, options)

    options.model_path = options.output_dir
    test_single_cnn(train_loader, "train", options.split, criterion, options)
    test_single_cnn(valid_loader, "validation", options.split, criterion,
                    options)

    total_time = time() - total_time
    print("Total time of computation: %d s" % total_time)