Пример #1
0
def train(config_path, result_dir):
    config = yaml_utils.Config(
        yaml.load(open(config_path), Loader=yaml.SafeLoader))
    result_dir, pattern = make_result_dir_path(config, result_dir)

    # Setup the training
    iteration = config.expr_itr
    main_script = config.main
    gpu_num = config.gpu_num
    train_cmd = [
        'mpiexec', '--allow-run-as-root', '-n', gpu_num, 'python', main_script,
        '--config_path', config_path
    ]

    logger.info("Start Training: {} for {} iterations".format(
        pattern, iteration))
    for i, seed in enumerate(SEED[:iteration]):
        cmd = train_cmd + ['--seed', str(seed), '--process_num', str(i)]
        ret = subprocess.run(cmd)
        if ret.returncode == 0:
            logger.info("Succ Train: Iteration no.{} of {}".format(
                i + 1, pattern))
        else:
            logger.info("Fail Train: Iteration no.{} of {}".format(
                i + 1, pattern))
            logger.info("ErrorLog: {}".format(ret.stderr))
            break

    logger.info("End Training: {} for {} iterations".format(
        pattern, iteration))
Пример #2
0
def test(config_path, result_dir):
    config = yaml_utils.Config(
        yaml.load(open(config_path), Loader=yaml.SafeLoader))
    result_dir, pattern = make_result_dir_path(config, result_dir)

    # Setup
    iteration = config.expr_itr
    main_script = config.main.replace('trainer', 'tester')
    test_cmd = ['python', main_script, '--config_path', config_path]

    logger.info("Start Testing: {} for {} iterations".format(
        pattern, iteration))
    for i in range(iteration):
        cmd = test_cmd + ['--process_num', str(i)]
        ret = subprocess.run(cmd)
        if ret.returncode == 0:
            logger.info("Succ Test: Iteration no.{} of {}".format(
                i + 1, pattern))
        else:
            logger.info("Fail Test: Iteration no.{} of {}".format(
                i + 1, pattern))
            logger.info("ErrorLog: {}".format(ret.stderr))

    make_summary(result_dir, pattern)

    logger.info("End Testing: {} for {} iterations".format(pattern, iteration))
Пример #3
0
def main():
    parser = argparse.ArgumentParser(description='Train pre')

    parser.add_argument('--base',
                        '-B',
                        default=os.path.dirname(os.path.abspath(__file__)),
                        help='base directory path of program files')
    parser.add_argument('--config_path',
                        type=str,
                        default='configs/Scatter_plot.yml',
                        help='path to config file')
    parser.add_argument('--out',
                        '-o',
                        default='results/Scatter_plot',
                        help='Directory to output the result')
    parser.add_argument('--root',
                        '-R',
                        default=os.path.dirname(os.path.abspath(__file__)),
                        help='Root directory path of input image')

    args = parser.parse_args()

    config = yaml_utils.Config(
        yaml.load(open(os.path.join(args.base, args.config_path))))

    #make output dir
    result_dir = os.path.join(args.base, args.out)
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)

    print('----- Load dataset -----')
    # Read path to hr data and lr data
    # path_pairs = []
    # with open(os.path.join(args.base, config.dataset['training_fn'])) as paths_file:
    #     for line in paths_file:
    #         line = line.split()
    #         if not line: continue
    #         path_pairs.append(line[:])
    #
    # lr_path_csv = path_pairs[2][0]  # LR
    # hr_patch_csv = path_pairs[2][1]  # HR
    # lr_patch = pd.read_csv(os.path.join(args.root, lr_path_csv))
    # hr_patch = pd.read_csv(os.path.join(args.root, hr_patch_csv))
    lr_patch = pd.read_csv(
        "G:/experiment/input_adjast_result/for_KL/LR_1680.csv")
    hr_patch = pd.read_csv(
        "G:/experiment/input_adjast_result/for_KL/HR_1680.csv")

    lr_patch.describe()
    hr_patch.describe()

    #plot
    plt.scatter(lr_patch['std'], lr_patch['mean'], c='blue')
    plt.scatter(hr_patch['std'], hr_patch['mean'], c='red')
    # plt.scatter(lr_patch['std'],lr_patch['mean'],c='blue')

    plt.ylabel('mean')
    plt.xlabel('std')
    plt.show()
Пример #4
0
def main():
    parser = argparse.ArgumentParser(description='Train pre')

    parser.add_argument('--base',
                        '-B',
                        default=os.path.dirname(os.path.abspath(__file__)),
                        help='base directory path of program files')
    parser.add_argument('--config_path',
                        type=str,
                        default='configs/cutting_position.yml',
                        help='path to config file')
    parser.add_argument('--out',
                        '-o',
                        default='results/cutting_position',
                        help='Directory to output the result')
    parser.add_argument('--margin',
                        '-m',
                        default=5,
                        help='patch margin (ratio)')
    parser.add_argument('--root',
                        '-R',
                        default=os.path.dirname(os.path.abspath(__file__)),
                        help='Root directory path of input image')

    args = parser.parse_args()

    config = yaml_utils.Config(
        yaml.load(open(os.path.join(args.base, args.config_path))))

    #image and mask path
    image_path = 'train/denoising/MicroCT.mhd'
    mask_path = 'train/calc_mask.mhd'

    #make output dir
    result_dir = os.path.join(args.base, args.out)
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)

    print('----- Load data -----')

    sitkhr = sitk.ReadImage(os.path.join(args.root, image_path))
    hr = sitk.GetArrayFromImage(sitkhr).astype("float32")
    sitkmask = sitk.ReadImage(os.path.join(args.root, mask_path))
    calc_mask = sitk.GetArrayFromImage(sitkmask).astype("float32")

    #adjast
    train = hr[:, 0:1210, :]
    mask = calc_mask[:, 0:1210, :]

    print('----- Loop start -----')

    hr_patch = []
    lr_patch = []
    for i in range(config.num['number']):
        i = i + config.num['start_position']

        # cut train and mask
        cut_hr, cut_lr = np.split(train, [i], 0)
        hr_mask, lr_mask = np.split(mask, [i], 0)
Пример #5
0
def expr(config_path, result_dir):
    reporter = SlackWrapper()

    config = yaml_utils.Config(
        yaml.load(open(config_path), Loader=yaml.SafeLoader))
    result_dir, pattern = make_result_dir_path(config, result_dir)

    # Setup the training
    iteration = config.expr_itr
    main_script = config.main
    gpu_num = config.gpu_num
    train_cmd = [
        'mpiexec', '--allow-run-as-root', '-n', gpu_num, 'python', main_script,
        '--config_path', config_path
    ]
    test_cmd = [
        'python',
        main_script.replace('trainer', 'tester'), '--config_path', config_path
    ]

    fail_flag = False

    logger.info("Start: {} for {} iterations".format(pattern, iteration))
    for i, seed in enumerate(SEED[:iteration]):
        # Training
        cmd = train_cmd + ['--seed', str(seed), '--process_num', str(i)]
        ret = subprocess.run(cmd)
        if ret.returncode == 0:
            logger.info("Succ Train: Iteration no.{} of {}".format(
                i + 1, pattern))
        else:
            fail_flag = True
            logger.info("Fail Train: Iteration no.{} of {}".format(
                i + 1, pattern))
            logger.info("ErrorLog: {}".format(ret.stderr))
            break

        # Testing
        cmd = test_cmd + ['--process_num', str(i)]
        ret = subprocess.run(cmd)
        if ret.returncode == 0:
            logger.info("Succ Test: Iteration no.{} of {}".format(
                i + 1, pattern))
        else:
            logger.info("Fail Test: Iteration no.{} of {}".format(
                i + 1, pattern))
            logger.info("ErrorLog: {}".format(ret.stderr))

    if fail_flag:
        msg = "{} was failed while the training.".format(pattern)
        logger.info(msg)
        reporter.report_fail(msg)
    else:
        summary = make_summary(result_dir, pattern)
        logger.info("End: {} for {} iterations".format(pattern, iteration))
        reporter.report_summary(summary)
Пример #6
0
def main():
    parser = argparse.ArgumentParser(description='Preprocessing for 3d-unet')
    parser.add_argument('--root', '-R', default=os.path.dirname(os.path.abspath(__file__)),
                        help='Root directory path of input image')
    parser.add_argument('--config_path', type=str, default='configs/base.yml',
                        help='path to config file')

    parser.add_argument('--training_list', default='configs/training_list.txt',
                        help='Path to training image list file')
    parser.add_argument('--validation_list', default='configs/validation_list.txt',
                        help='Path to validation image list file')

    parser.add_argument('--training_coordinate_list', type=str, default='configs/training_coordinate_list.csv')
    parser.add_argument('--validation_coordinate_list', type=str, default='configs/validation_coordinate_list.csv')
    args = parser.parse_args()

    config = yaml_utils.Config(yaml.load(open(os.path.join(os.path.dirname(__file__), args.config_path))))

    def read_data_list_txt(data_list_txt):
        path_pairs = []
        with open(data_list_txt) as paths_file:
            for line in paths_file:
                line = line.split()
                if not line : continue
                path_pairs.append(line[:])
        return path_pairs

    paths = read_data_list_txt(args.training_list)

    for i in paths:
        org = IO.read_mhd_and_raw(os.path.join(args.root, 'data', i[0]))
        make_coordinate_csv(out_dir = args.root,
                            csv_name = args.training_coordinate_list,
                            input_size = [org.shape[2], org.shape[1], org.shape[0]],
                            interval = [config.patch['interval'], config.patch['interval'],config.patch['interval']],
                            patch_side=[config.patch['patchside'],config.patch['patchside'],config.patch['patchside']])


    paths = read_data_list_txt(args.validation_list)

    for i in paths:
        org = IO.read_mhd_and_raw(os.path.join(args.root, 'data', i[0]))
        make_coordinate_csv(out_dir = args.root,
                            csv_name = args.validation_coordinate_list,
                            input_size = [org.shape[2], org.shape[1], org.shape[0]],
                            interval = [config.patch['interval'], config.patch['interval'],config.patch['interval']],
                            patch_side=[config.patch['patchside'],config.patch['patchside'],config.patch['patchside']])
Пример #7
0
def main():
    schema = 'filename\tTop1\tTop5\tF-Score\tEntropy\tPrecision\tRecall\tF-score'
    parser = argparse.ArgumentParser(
        description='Target Model Tester \n ({})'.format(schema))
    parser.add_argument('--config_path',
                        type=str,
                        default='configs/base.yml',
                        help='path to config file')
    parser.add_argument('--results_dir',
                        type=str,
                        default='./result/',
                        help='directory to save the results to')
    parser.add_argument('--batchsize',
                        type=int,
                        default=128,
                        help='Batchsize for testing')
    parser.add_argument('--process_num', type=int, default=0)
    parser.add_argument('--seed', type=int, default=42)

    args = parser.parse_args()
    config = yaml_utils.Config(
        yaml.load(open(args.config_path), Loader=yaml.SafeLoader))
    pattern = "-".join([
        config.pattern, config.models['classifier']['name'],
        config.dataset['dataset_name']
    ])
    out_path = args.results_dir + '/' + pattern

    # Model
    model_path = out_path + '/classifier{}.npz'.format(args.process_num)
    model = load_pretrained_models(config.models['classifier'], model_path)

    # Dataset
    test_dataset = yaml_utils.load_dataset(config, test=True)
    test_itr = chainer.iterators.SerialIterator(test_dataset,
                                                args.batchsize,
                                                repeat=False)

    chainer.cuda.get_device_from_id(0).use()
    model.to_gpu()  # Copy the model to the GPU

    xp = model.xp

    pred_labels = []
    correct_labels = []
    count = 0
    with chainer.using_config('train', False):
        for batch in test_itr:
            batchsize = len(batch)
            images = [batch[i][0] for i in range(batchsize)]
            labels = [batch[i][1] for i in range(batchsize)]
            x = xp.array(images)
            result = model(x).data
            pred_labels.append(chainer.cuda.to_cpu(result))
            correct_labels.append(np.array(labels))
            count += 1

    pred_labels = np.concatenate(pred_labels)
    correct_labels = np.concatenate(correct_labels)
    top1 = F.mean(F.accuracy(pred_labels, correct_labels)).data
    top5 = calc_top5_acc(pred_labels, correct_labels)
    precision, recall, Fscore, _ = F.classification_summary(
        pred_labels, correct_labels)
    out_results = {
        'test_{}'.format(args.process_num): {
            'accuracy': float(top1),
            'top-5 accuracy': float(top5),
            'precision': float(F.mean(precision).data),
            'recall': float(F.mean(recall).data),
            'f-score': float(F.mean(Fscore).data)
        }
    }

    result_path = out_path + '/test_result.yaml'
    if os.path.exists(result_path):
        result_yaml = yaml.load(open(result_path, 'r+'),
                                Loader=yaml.SafeLoader)
    else:
        result_yaml = {}
    result_yaml.update(out_results)
    with open(result_path, mode='w') as f:
        f.write(yaml.dump(result_yaml, default_flow_style=False))

    print('{}\t{}\t{}\t{}\t{}\t{}'.format(pattern, top1, top5,
                                          F.mean(precision).data,
                                          F.mean(recall).data,
                                          F.mean(Fscore).data))
    return out_results
def main():
    parser = argparse.ArgumentParser(description='Train pre')

    parser.add_argument('--base',
                        '-B',
                        default=os.path.dirname(os.path.abspath(__file__)),
                        help='base directory path of program files')
    parser.add_argument('--config_path',
                        type=str,
                        default='configs/Scatter_plot.yml',
                        help='path to config file')
    parser.add_argument('--out',
                        '-o',
                        default='results/Scatter_plot',
                        help='Directory to output the result')
    parser.add_argument('--root',
                        '-R',
                        default=os.path.dirname(os.path.abspath(__file__)),
                        help='Root directory path of input image')

    args = parser.parse_args()

    config = yaml_utils.Config(
        yaml.load(open(os.path.join(args.base, args.config_path))))

    #make output dir
    result_dir = os.path.join(args.base, args.out)
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)

    print('----- Load dataset -----')
    # Read path to hr data and lr data
    path_pairs = []
    with open(os.path.join(args.base,
                           config.dataset['training_fn'])) as paths_file:
        for line in paths_file:
            line = line.split()
            if not line: continue
            path_pairs.append(line[:])

    coordinate_csv_path = path_pairs[0][0]  # LR
    coordinate_csv_path2 = path_pairs[0][1]  # HR
    coordinate = pd.read_csv(os.path.join(args.root, coordinate_csv_path),
                             names=("x", "y", "z")).values.tolist()
    coordinate2 = pd.read_csv(os.path.join(args.root, coordinate_csv_path2),
                              names=("x", "y", "z")).values.tolist()

    # data load
    sitklr = sitk.ReadImage(os.path.join(args.root, path_pairs[1][0]))
    lr = sitk.GetArrayFromImage(sitklr).astype("float32")
    sitkhr = sitk.ReadImage(os.path.join(args.root, path_pairs[1][1]))
    hr = sitk.GetArrayFromImage(sitkhr).astype("float32")

    print('----- data load done  -----')
    print('-----  start  -----')

    for i in range(len(coordinate)):
        x, y, z = coordinate[i]
        x_s, x_e = x, x + config.patch['patchside']
        y_s, y_e = y, y + config.patch['patchside']
        z_s, z_e = z, z + config.patch['patchside']

        #patch image
        patch_image = lr[z_s:z_e, y_s:y_e, x_s:x_e]

        #calc std and mean
        Std_lr = np.std(patch_image)
        mean_lr = np.mean(patch_image)

        df = pd.DataFrame({
            'x': [x],
            'y': [y],
            'z': [z],
            'std': [Std_lr],
            'mean': [mean_lr]
        })
        if i == 0:
            df.to_csv('{}/lr_std_mean.csv'.format(result_dir),
                      index=False,
                      encoding='utf-8',
                      mode='a')
        df.to_csv('{}/lr_std_mean.csv'.format(result_dir),
                  index=False,
                  header=False,
                  encoding='utf-8',
                  mode='a')

    print('-----  LR done  -----')

    for j in range(len(coordinate2)):
        x, y, z = coordinate2[j]
        x_s, x_e = x, x + config.patch['patchside']
        y_s, y_e = y, y + config.patch['patchside']
        z_s, z_e = z, z + config.patch['patchside']

        #patch image
        patch_image2 = hr[z_s:z_e, y_s:y_e, x_s:x_e]

        #calc std and mean
        Std_hr = np.std(patch_image2)
        mean_hr = np.mean(patch_image2)
        df = pd.DataFrame({
            'x': [x],
            'y': [y],
            'z': [z],
            'std': [Std_hr],
            'mean': [mean_hr]
        })
        if j == 0:
            df.to_csv('{}/lr_std_mean.csv'.format(result_dir),
                      index=False,
                      encoding='utf-8',
                      mode='a')
        df.to_csv('{}/hr_std_mean.csv'.format(result_dir),
                  index=False,
                  header=False,
                  encoding='utf-8',
                  mode='a')

    print('-----  HR done  -----')
Пример #9
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--config_path',
                        type=str,
                        default='configs/base.yml',
                        help='path to config file')
    parser.add_argument('--results_dir',
                        type=str,
                        default='./result/',
                        help='directory to save the results to')
    parser.add_argument('--resume',
                        type=str,
                        default='',
                        help='path to the snapshot')
    parser.add_argument('--process_num', type=int, default=0)
    parser.add_argument('--seed', type=int, default=42)

    args = parser.parse_args()
    config = yaml_utils.Config(
        yaml.load(open(args.config_path), Loader=yaml.SafeLoader))
    pattern = "-".join([
        config.pattern, config.models['classifier']['name'],
        config.dataset['dataset_name']
    ])
    comm = chainermn.create_communicator()
    device = comm.intra_rank

    if comm.rank == 0:
        print('==========================================')
        print('Num process (COMM_WORLD): {}'.format(MPI.COMM_WORLD.Get_size()))
        print('Num Minibatch-size: {}'.format(config.batchsize))
        print('Num Epoch: {}'.format(config.epoch))
        print('==========================================')

    # Model
    classifier = load_models(config.models['classifier'])

    if args.resume:
        print("Resume training with snapshot:{}".format(args.resume))
        chainer.serializers.load_npz(args.resume, classifier)

    chainer.cuda.get_device_from_id(device).use()
    classifier.to_gpu()
    # models = {"classifier": classifier}

    # Optimizer
    opt = make_optimizer(classifier, comm, config)
    opt.add_hook(chainer.optimizer.WeightDecay(5e-4))

    # Dataset
    if comm.rank == 0:
        dataset = yaml_utils.load_dataset(config)
        first_size = int(len(dataset) * config.train_val_split_ratio)
        train, val = chainer.datasets.split_dataset_random(dataset,
                                                           first_size,
                                                           seed=args.seed)
    else:
        yaml_utils.load_module(config.dataset['dataset_func'],
                               config.dataset['dataset_name'])
        train, val = None, None

    train = chainermn.scatter_dataset(train, comm)
    val = chainermn.scatter_dataset(val, comm)

    # Iterator
    train_iterator = chainer.iterators.SerialIterator(train, config.batchsize)
    val_iterator = chainer.iterators.SerialIterator(val,
                                                    config.batchsize,
                                                    repeat=False,
                                                    shuffle=False)
    kwargs = config.updater['args'] if 'args' in config.updater else {}
    kwargs.update({
        'classifier': classifier,
        'iterator': train_iterator,
        'optimizer': opt,
        'device': device,
    })

    # Updater
    updater = yaml_utils.load_updater_class(config)
    updater = updater(**kwargs)
    out = args.results_dir + '/' + pattern

    if comm.rank == 0:
        create_result_dir(out, args.config_path, config)

    # Trainer
    trainer = training.Trainer(updater, (config.epoch, 'epoch'), out=out)

    # Evaluator
    evaluator = ClassifierEvaluator(val_iterator, classifier, device=device)
    evaluator = chainermn.create_multi_node_evaluator(evaluator, comm)
    trainer.extend(evaluator)

    # Learning Rate Schedule (fixed)
    schedule = [config.epoch * 0.3, config.epoch * 0.6, config.epoch * 0.8]
    trainer.extend(extensions.ExponentialShift('lr', 0.1),
                   trigger=ManualScheduleTrigger(schedule, 'epoch'))

    report_keys = [
        'epoch', 'main/loss', 'validation/main/loss', 'main/accuracy',
        'validation/main/accuracy', 'elapsed_time'
    ]
    if comm.rank == 0:
        # Set up logging
        trainer.extend(extensions.snapshot_object(
            classifier, 'classifier{}.npz'.format(args.process_num)),
                       trigger=MaxValueTrigger('validation/main/accuracy'))
        trainer.extend(
            extensions.LogReport(keys=report_keys,
                                 trigger=(config.display_interval, 'epoch')))
        trainer.extend(extensions.PrintReport(report_keys),
                       trigger=(config.display_interval, 'epoch'))
        trainer.extend(
            extensions.ProgressBar(
                update_interval=config.progressbar_interval))
    # Run the training
    trainer.run()
Пример #10
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--base',
                        '-B',
                        default=os.path.dirname(os.path.abspath(__file__)),
                        help='base directory path of program files')
    parser.add_argument('--config_path',
                        type=str,
                        default='configs/base.yml',
                        help='path to config file')
    parser.add_argument('--out',
                        '-o',
                        default='results/prediction',
                        help='Directory to output the result')

    parser.add_argument('--model',
                        '-m',
                        default='',
                        help='Load model data(snapshot)')

    parser.add_argument('--root',
                        '-R',
                        default=os.path.dirname(os.path.abspath(__file__)),
                        help='Root directory path of input image')
    parser.add_argument('--test_list',
                        default='configs/test_list.txt',
                        help='Path to test image list file')
    args = parser.parse_args()

    config = yaml_utils.Config(
        yaml.load(open(os.path.join(args.base, args.config_path))))
    print('GPU: {}'.format(args.gpu))
    print('')

    unet = UNet3D(config.unet['number_of_label'])
    chainer.serializers.load_npz(args.model, unet)
    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        unet.to_gpu()
    xp = unet.xp

    # Read test list
    path_pairs = []
    with open(os.path.join(args.base, args.test_list)) as paths_file:
        for line in paths_file:
            line = line.split()
            if not line: continue
            path_pairs.append(line[:])

    for i in path_pairs:
        print('   Org   from: {}'.format(i[0]))
        print('   label from: {}'.format(i[1]))
        sitkOrg = sitk.ReadImage(os.path.join(args.root, 'data', i[0]))
        org = sitk.GetArrayFromImage(sitkOrg).astype("float32")

        # Calculate maximum of number of patch at each side
        ze, ye, xe = org.shape
        xm = int(math.ceil((float(xe) / float(config.patch['patchside']))))
        ym = int(math.ceil((float(ye) / float(config.patch['patchside']))))
        zm = int(math.ceil((float(ze) / float(config.patch['patchside']))))

        margin = ((0, config.patch['patchside']),
                  (0, config.patch['patchside']), (0,
                                                   config.patch['patchside']))
        org = np.pad(org, margin, 'edge')
        org = chainer.Variable(
            xp.array(org[np.newaxis, np.newaxis, :], dtype=xp.float32))

        prediction_map = np.zeros(
            (ze + config.patch['patchside'], ye + config.patch['patchside'],
             xe + config.patch['patchside']))
        probability_map = np.zeros(
            (config.unet['number_of_label'], ze + config.patch['patchside'],
             ye + config.patch['patchside'], xe + config.patch['patchside']))

        # Patch loop
        for s in range(xm * ym * zm):
            xi = int(s % xm) * config.patch['patchside']
            yi = int((s % (ym * xm)) / xm) * config.patch['patchside']
            zi = int(s / (ym * xm)) * config.patch['patchside']
            # Extract patch from original image
            patch = org[:, :, zi:zi + config.patch['patchside'],
                        yi:yi + config.patch['patchside'],
                        xi:xi + config.patch['patchside']]
            with chainer.using_config('train', False), chainer.using_config(
                    'enable_backprop', False):
                probability_patch = unet(patch)

            # Generate probability map
            probability_patch = probability_patch.data
            if args.gpu >= 0:
                probability_patch = chainer.cuda.to_cpu(probability_patch)
            for ch in range(probability_patch.shape[1]):
                probability_map[ch, zi:zi + config.patch['patchside'],
                                yi:yi + config.patch['patchside'], xi:xi +
                                config.patch['patchside']] = probability_patch[
                                    0, ch, :, :, :]

            prediction_patch = np.argmax(probability_patch, axis=1)

            prediction_map[zi:zi + config.patch['patchside'],
                           yi:yi + config.patch['patchside'], xi:xi +
                           config.patch['patchside']] = prediction_patch[
                               0, :, :, :]

        print('Save image')
        probability_map = probability_map[:, :ze, :ye, :xe]
        prediction_map = prediction_map[:ze, :ye, :xe]

        # Save prediction map
        imagePrediction = sitk.GetImageFromArray(prediction_map)
        imagePrediction.SetSpacing(sitkOrg.GetSpacing())
        imagePrediction.SetOrigin(sitkOrg.GetOrigin())
        result_dir = os.path.join(args.base, args.out, os.path.dirname(i[0]))
        if not os.path.exists(result_dir):
            os.makedirs(result_dir)
        fn = os.path.splitext(os.path.basename(i[0]))[0]
        sitk.WriteImage(imagePrediction, '{}/{}.mhd'.format(result_dir, fn))

        # Save probability map
        for ch in range(probability_map.shape[0]):
            imageProbability = sitk.GetImageFromArray(probability_map[ch, :])
            imageProbability.SetSpacing(sitkOrg.GetSpacing())
            imageProbability.SetOrigin(sitkOrg.GetOrigin())
            sitk.WriteImage(
                imageProbability,
                '{}/{}_probability_{}.mhd'.format(result_dir, fn, ch))
Пример #11
0
def main():
    parser = argparse.ArgumentParser(description='Train 3D-Unet')
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=0,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--root',
                        '-R',
                        default=os.path.dirname(os.path.abspath(__file__)),
                        help='Root directory path of input image')
    parser.add_argument('--config_path',
                        type=str,
                        default='configs/base.yml',
                        help='path to config file')
    parser.add_argument('--out',
                        '-o',
                        default='Results_trM1_ValiM2',
                        help='Directory to output the result')

    parser.add_argument('--model',
                        '-m',
                        default='UNet3D_200.npz',
                        help='Load model data')
    parser.add_argument('--resume',
                        '-res',
                        default='',
                        help='Resume the training from snapshot')

    parser.add_argument('--test_list',
                        default='configs/M3.txt',
                        help='Path to training image list file')

    parser.add_argument('--test_coordinate_list',
                        type=str,
                        default='configs/test_coordinate_nopad52_1.csv')

    args = parser.parse_args()

    config = yaml_utils.Config(
        yaml.load(
            open(os.path.join(os.path.dirname(__file__), args.config_path))))
    print('GPU: {}'.format(args.gpu))
    print('')

    test = UnetDataset(args.root, args.test_list, args.test_coordinate_list,
                       config.patch['patchside'])
    unet = UNet3D(2)

    if (args.gpu >= 0):
        use_cudnn = True
        unet.to_gpu()
    else:
        unet.to_cpu()

    chainer.serializers.load_npz(os.path.join(args.root, args.out, args.model),
                                 unet)

    coordi = pd.read_csv(os.path.join(args.root, args.test_coordinate_list),
                         names=("x", "y", "z")).values.tolist()
    out_side = 52
    ResultOut = np.zeros((860, 544, 544), dtype=np.uint8)

    for index in range(len(test)):
        t, x = test[index]
        x = x[np.newaxis, :]
        x = cp.array(x)
        print(x.shape)
        print(index)
        with chainer.using_config('train', False), chainer.using_config(
                'enable_backprop', False):
            y = unet(x)
        y = F.softmax(y).data
        pred_label = np.squeeze(to_cpu(y.argmax(axis=1)))

        x, y, z = coordi[index]
        x_s, x_e = (x - int(out_side / 2)), (x + int(out_side / 2))
        y_s, y_e = (y - int(out_side / 2)), (y + int(out_side / 2))
        z_s, z_e = (z - int(out_side / 2)), (z + int(out_side / 2))

        ResultOut[z_s:z_e, y_s:y_e, x_s:x_e] = pred_label

    io.save_raw(ResultOut, os.path.join(args.root, args.out,
                                        "TestResultM3.raw"), np.uint8)
    print("Test done")
Пример #12
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--base',
                        '-B',
                        default=os.path.dirname(os.path.abspath(__file__)),
                        help='base directory path of program files')
    parser.add_argument('--config_path',
                        type=str,
                        default='../../configs/training.yml',
                        help='path to config file')
    parser.add_argument('--out',
                        '-o',
                        default='../../work/visualize_hidden_layer',
                        help='Directory to output the result')

    parser.add_argument('--model',
                        '-m',
                        default='',
                        help='Load model data(snapshot)')

    parser.add_argument('--root',
                        '-R',
                        default=os.path.dirname(os.path.abspath(__file__)),
                        help='Root directory path of input image')

    parser.add_argument('--save_flag',
                        type=bool,
                        default=False,
                        help='Decision flag whether to save criteria image')
    parser.add_argument('--filename_arg',
                        type=str,
                        default='val_fn',
                        help='Which do you want to use val_fn or test_fn')

    args = parser.parse_args()

    config = yaml_utils.Config(
        yaml.load(open(os.path.join(args.base, args.config_path))))
    print('GPU: {}'.format(args.gpu))
    LR_PATCH_SIDE, HR_PATCH_SIDE = config.patch['patchside'], config.patch[
        'patchside']
    LR_PATCH_SIZE, HR_PATCH_SIZE = int(LR_PATCH_SIDE**3), int(HR_PATCH_SIDE**3)
    LR_MIN, LR_MAX = config.patch['lrmin'], config.patch['lrmax']
    print('HR PATCH SIZE: {}'.format(HR_PATCH_SIZE))
    print('LR PATCH SIZE: {}'.format(LR_PATCH_SIZE))
    print('')

    gen = Generator()
    chainer.serializers.load_npz(args.model, gen)
    if args.gpu >= 0:
        chainer.backends.cuda.set_max_workspace_size(1024 * 1024 * 1024)  # 1GB
        chainer.backends.cuda.get_device_from_id(args.gpu).use()
        gen.to_gpu()
    xp = gen.xp

    def create_result_dir(base_dir, output_dir, config_path, config):
        """https://github.com/pfnet-research/sngan_projection/blob/master/train.py"""
        result_dir = os.path.join(base_dir, output_dir)
        if not os.path.exists(result_dir):
            os.makedirs(result_dir)

        def copy_to_result_dir(fn, result_dir):
            bfn = os.path.basename(fn)
            shutil.copy(fn, '{}/{}'.format(result_dir, bfn))

        copy_to_result_dir(os.path.join(base_dir, config_path), result_dir)

        copy_to_result_dir(
            os.path.join(base_dir, '../..', config.network['fn']), result_dir)
        copy_to_result_dir(
            os.path.join(base_dir, '../..', config.updater['fn']), result_dir)
        copy_to_result_dir(
            os.path.join(base_dir, '../..', config.dataset[args.filename_arg]),
            result_dir)

    create_result_dir(args.base, args.out, args.config_path, config)

    # Read data
    path_pairs = []
    with open(
            os.path.join(args.base, '../..',
                         config.dataset[args.filename_arg])) as paths_file:
        for line in paths_file:
            line = line.split()
            if not line: continue
            path_pairs.append(line[:])

    print(' Inference start')
    for i in path_pairs:
        print('    Tri from: {}'.format(i[0]))
        print('    Org from: {}'.format(i[1]))
        #Read data and reshape
        sitkTri = sitk.ReadImage(os.path.join(args.root, i[0]))
        tri = sitk.GetArrayFromImage(sitkTri).astype("float32")
        tri = (tri - LR_MIN) / (LR_MAX - LR_MIN)
        # Calculate maximum of number of patch at each side
        ze, ye, xe = tri.shape
        xm = int(math.ceil((float(xe) / float(config.patch['interval']))))
        ym = int(math.ceil((float(ye) / float(config.patch['interval']))))
        zm = int(math.ceil((float(ze) / float(config.patch['interval']))))

        margin = ((0, config.patch['patchside']),
                  (0, config.patch['patchside']), (0,
                                                   config.patch['patchside']))
        tri = np.pad(tri, margin, 'edge')
        tri = chainer.Variable(
            xp.array(tri[np.newaxis, np.newaxis, :], dtype=xp.float32))

        ch = 4
        sc_map = np.zeros(
            (ch, ze + config.patch['patchside'],
             ye + config.patch['patchside'], xe + config.patch['patchside']))
        res_map = np.zeros(
            (ch, ze + config.patch['patchside'],
             ye + config.patch['patchside'], xe + config.patch['patchside']))
        overlap_count = np.zeros(
            (sc_map.shape[1], sc_map.shape[2], sc_map.shape[3]))

        # Patch loop
        print('     #Patches {}'.format(xm * ym * zm))
        for s in range(xm * ym * zm):
            xi = int(s % xm) * config.patch['interval']
            yi = int((s % (ym * xm)) / xm) * config.patch['interval']
            zi = int(s / (ym * xm)) * config.patch['interval']
            # Extract patch from original image
            patch = tri[:, :, zi:zi + config.patch['patchside'],
                        yi:yi + config.patch['patchside'],
                        xi:xi + config.patch['patchside']]
            with chainer.using_config('train', False), chainer.using_config(
                    'enable_backprop', False):
                sc, res = gen.get_hidden_layer(patch)

            # Generate probability map
            sc = sc.array
            res = res.array
            if args.gpu >= 0:
                sc = chainer.backends.cuda.to_cpu(sc)
                res = chainer.backends.cuda.to_cpu(res)

            sc_map[:, zi:zi + config.patch['patchside'],
                   yi:yi + config.patch['patchside'],
                   xi:xi + config.patch['patchside']] += np.squeeze(sc)
            res_map[:, zi:zi + config.patch['patchside'],
                    yi:yi + config.patch['patchside'],
                    xi:xi + config.patch['patchside']] += np.squeeze(res)
            overlap_count[zi:zi + config.patch['patchside'],
                          yi:yi + config.patch['patchside'],
                          xi:xi + config.patch['patchside']] += 1

        print('     Save image')
        sc_map = sc_map[:, :ze, :ye, :xe]
        res_map = res_map[:, :ze, :ye, :xe]
        overlap_count = overlap_count[:ze, :ye, :xe]
        sc_map[:, ...] /= overlap_count
        res_map[:, ...] /= overlap_count

        # Save prediction map
        result_dir = os.path.join(args.base, args.out)
        sc_dir = '{}/sc'.format(result_dir)
        res_dir = '{}/res'.format(result_dir)
        if not os.path.exists(sc_dir):
            os.makedirs(sc_dir)
        if not os.path.exists(res_dir):
            os.makedirs(res_dir)

        sc_ims = []
        res_ims = []
        for loop in range(sc_map.shape[0]):
            fn = os.path.splitext(os.path.basename(i[0]))[0]

            scImage = sitk.GetImageFromArray(sc_map[loop, ...])
            scImage.SetSpacing(sitkTri.GetSpacing())
            scImage.SetOrigin(sitkTri.GetOrigin())
            sitk.WriteImage(scImage,
                            '{}/{}_{:04d}.mhd'.format(sc_dir, fn, loop))

            resImage = sitk.GetImageFromArray(res_map[loop, ...])
            resImage.SetSpacing(sitkTri.GetSpacing())
            resImage.SetOrigin(sitkTri.GetOrigin())
            sitk.WriteImage(resImage,
                            '{}/{}_{:04d}.mhd'.format(res_dir, fn, loop))

            # Rescale for visualization
            nd_sc = sitk.GetArrayFromImage(
                sitk.Cast(sitk.RescaleIntensity(scImage),
                          sitk.sitkUInt8))[240, 30:130, 80:180]
            nd_res = sitk.GetArrayFromImage(
                sitk.Cast(sitk.RescaleIntensity(resImage),
                          sitk.sitkUInt8))[240, 30:130, 80:180]
            sc_ims.append(nd_sc)
            res_ims.append(nd_res)

        sc_ims = np.asarray(sc_ims).transpose(1, 0, 2).reshape((100, -1))
        res_ims = np.asarray(res_ims).transpose(1, 0, 2).reshape((100, -1))
        ims = np.vstack((sc_ims, res_ims))
        preview_path = '{}/preview.png'.format(result_dir)
        figure = plt.figure()
        plt.imshow(ims, cmap='gray', interpolation='none')
        plt.axis('off')
        plt.savefig(preview_path, dpi=300)
        with open('{}/preview.pickle'.format(result_dir), 'wb') as f:
            pickle.dump(figure, f)

        plt.show()

    print(' Inference done')
Пример #13
0
def main():
    parser = argparse.ArgumentParser(description='Train CVAE')
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--base',
                        '-B',
                        default=os.path.dirname(os.path.abspath(__file__)),
                        help='base directory path of program files')
    parser.add_argument('--config_path',
                        type=str,
                        default='configs/training.yml',
                        help='path to config file')
    parser.add_argument('--out',
                        '-o',
                        default='results/training',
                        help='Directory to output the result')

    parser.add_argument('--model', '-m', default='', help='Load model data')
    parser.add_argument('--resume',
                        '-res',
                        default='',
                        help='Resume the training from snapshot')

    parser.add_argument('--root',
                        '-R',
                        default=os.path.dirname(os.path.abspath(__file__)),
                        help='Root directory path of input image')

    args = parser.parse_args()

    config = yaml_utils.Config(
        yaml.load(open(os.path.join(args.base, args.config_path))))

    print('GPU: {}'.format(args.gpu))
    print('# Minibatch-size: {}'.format(config.batchsize))
    print('# iteration: {}'.format(config.iteration))
    print('Learning Rate: {}'.format(config.adam['alpha']))
    print('')

    #load the dataset
    print('----- Load dataset -----')
    train = CvaeDataset(args.root,
                        os.path.join(args.base, config.dataset['training_fn']),
                        config.patch['patchside'],
                        [config.patch['lrmin'], config.patch['lrmax']],
                        augmentation=True)
    train_iter = chainer.iterators.SerialIterator(train,
                                                  batch_size=config.batchsize)
    val = CvaeDataset(args.root,
                      os.path.join(args.base, config.dataset['val_fn']),
                      config.patch['patchside'],
                      [config.patch['lrmin'], config.patch['lrmax']],
                      augmentation=False)
    val_iter = chainer.iterators.SerialIterator(val,
                                                batch_size=config.batchsize,
                                                repeat=False,
                                                shuffle=False)

    print('----- Set up model ------')
    avg_elbo_loss = AvgELBOLoss()

    if args.gpu >= 0:
        chainer.backends.cuda.set_max_workspace_size(1024 * 1024 * 1024)  # 1GB
        chainer.backends.cuda.get_device_from_id(args.gpu).use()
        avg_elbo_loss.to_gpu(args.gpu)

    print('----- Make optimizer -----')

    def make_optimizer(model, alpha=0.00001, beta1=0.9, beta2=0.999):
        optimizer = chainer.optimizers.Adam(alpha=alpha,
                                            beta1=beta1,
                                            beta2=beta2)
        optimizer.setup(model)
        return optimizer

    gen_opt = make_optimizer(model=avg_elbo_loss,
                             alpha=config.adam['alpha'],
                             beta1=config.adam['beta1'],
                             beta2=config.adam['beta2'])

    print('----- Make updater -----')
    updater = training.updaters.StandardUpdater(iterator=train_iter,
                                                optimizer=gen_opt,
                                                device=args.gpu,
                                                loss_func=avg_elbo_loss)

    print('----- Save configs -----')

    def create_result_dir(base_dir, output_dir, config_path, config):
        """https://github.com/pfnet-research/sngan_projection/blob/master/train.py"""
        result_dir = os.path.join(base_dir, output_dir)
        if not os.path.exists(result_dir):
            os.makedirs(result_dir)

        def copy_to_result_dir(fn, result_dir):
            bfn = os.path.basename(fn)
            shutil.copy(fn, '{}/{}'.format(result_dir, bfn))

        copy_to_result_dir(os.path.join(base_dir, config_path), result_dir)

        copy_to_result_dir(os.path.join(base_dir, config.network['fn']),
                           result_dir)
        copy_to_result_dir(os.path.join(base_dir, config.dataset['fn']),
                           result_dir)
        copy_to_result_dir(
            os.path.join(base_dir, config.dataset['training_fn']), result_dir)

    create_result_dir(args.base, args.out, args.config_path, config)

    print('----- Make trainer -----')
    trainer = training.Trainer(updater, (config.iteration, 'iteration'),
                               out=os.path.join(args.base, args.out))

    print('----- Set up logging -----')
    snapshot_interval = (config.snapshot_interval, 'iteration')
    display_interval = (config.display_interval, 'iteration')
    evaluation_interval = (config.evaluation_interval, 'iteration')
    trainer.extend(
        extensions.snapshot(filename='snapshot_iter_{.updater.iteration}.npz'),
        trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        avg_elbo_loss.encoder,
        filename='encoder_iter_{.updater.iteration}.npz'),
                   trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        avg_elbo_loss.decoder,
        filename='decoder_iter_{.updater.iteration}.npz'),
                   trigger=snapshot_interval)
    # Write a log of evaluation statistics for each epoch
    trainer.extend(extensions.LogReport(trigger=display_interval))
    # Evaluator
    trainer.extend(CvaeEvaluator(val_iter, avg_elbo_loss, device=args.gpu),
                   trigger=evaluation_interval)
    trainer.extend(generate_samples(avg_elbo_loss,
                                    os.path.join(args.base, args.out)),
                   trigger=evaluation_interval,
                   priority=extension.PRIORITY_WRITER)
    # trainer.extend(projection_to_latent_space(avg_elbo_loss, os.path.join(args.base, args.out), train),
    #                trigger=evaluation_interval,
    #                priority=extension.PRIORITY_WRITER)
    trainer.extend(reconstruction_img(avg_elbo_loss,
                                      os.path.join(args.base, args.out),
                                      train),
                   trigger=evaluation_interval,
                   priority=extension.PRIORITY_WRITER)
    # Print selected entries of the log to stdout
    #trainer.extend(extensions.PrintReport(['epoch', 'iteration', 'gen/loss', 'elapsed_time']), trigger=display_interval)
    # Print a progress bar to stdout
    trainer.extend(extensions.ProgressBar(update_interval=10))
    # Save two plot images to the result dir
    if extensions.PlotReport.available():
        trainer.extend(
            extensions.PlotReport(['main/kl_penalty', 'validation/kl_penalty'],
                                  'iteration',
                                  file_name='kl_loss.png',
                                  trigger=display_interval))
        trainer.extend(
            extensions.PlotReport(['main/reconstr', 'validation/reconstr'],
                                  'iteration',
                                  file_name='reconstr_loss.png',
                                  trigger=display_interval))
        trainer.extend(
            extensions.PlotReport(['main/loss', 'validation/loss'],
                                  'iteration',
                                  file_name='loss.png',
                                  trigger=display_interval))

    if args.resume:
        # Resume from a snapshot
        chainer.serializers.load_npz(args.resume, trainer)

    # Run the training
    reset_seed(0)
    trainer.run()
Пример #14
0
def main():
    parser = argparse.ArgumentParser(description='Train CycleGAN')
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--base',
                        '-B',
                        default=os.path.dirname(os.path.abspath(__file__)),
                        help='base directory path of program files')
    parser.add_argument('--config_path',
                        type=str,
                        default='configs/training.yml',
                        help='path to config file')
    parser.add_argument('--out',
                        '-o',
                        default='results/training',
                        help='Directory to output the result')

    parser.add_argument('--model', '-m', default='', help='Load model data')

    parser.add_argument('--model2', '-m2', default='', help='Load model data')

    parser.add_argument('--resume',
                        '-res',
                        default='',
                        help='Resume the training from snapshot')

    parser.add_argument('--root',
                        '-R',
                        default=os.path.dirname(os.path.abspath(__file__)),
                        help='Root directory path of input image')

    args = parser.parse_args()

    config = yaml_utils.Config(
        yaml.load(open(os.path.join(args.base, args.config_path))))

    print('GPU: {}'.format(args.gpu))
    print('# Minibatch-size: {}'.format(config.batchsize))
    print('# iteration: {}'.format(config.iteration))
    print('Learning Rate: {}'.format(config.adam['alpha']))
    print('')

    #load the dataset
    print('----- Load dataset -----')
    train = CycleganDataset(args.root,
                            os.path.join(args.base,
                                         config.dataset['training_fn']),
                            config.patch['patchside'],
                            [config.patch['lrmin'], config.patch['lrmax']],
                            augmentation=True)
    train_iter = chainer.iterators.MultiprocessIterator(
        train, batch_size=config.batchsize)

    print('----- Set up model ------')
    gen = Generator_SR()
    gen2 = Generator_SR()
    disY = Discriminator()
    # chainer.serializers.load_npz(args.model, gen)
    # chainer.serializers.load_npz(args.model2, gen2)

    if args.gpu >= 0:
        chainer.backends.cuda.set_max_workspace_size(1024 * 1024 * 1024)  # 1GB
        chainer.backends.cuda.get_device_from_id(args.gpu).use()
        gen.to_gpu()
        gen2.to_gpu()
        disY.to_gpu()

    print('----- Make optimizer -----')

    def make_optimizer(model, alpha=0.00001, beta1=0.9, beta2=0.999):
        optimizer = chainer.optimizers.Adam(alpha=alpha,
                                            beta1=beta1,
                                            beta2=beta2)
        optimizer.setup(model)
        return optimizer

    gen_opt = make_optimizer(model=gen,
                             alpha=config.adam['alpha'],
                             beta1=config.adam['beta1'],
                             beta2=config.adam['beta2'])

    gen2_opt = make_optimizer(model=gen2,
                              alpha=config.adam['alpha'],
                              beta1=config.adam['beta1'],
                              beta2=config.adam['beta2'])

    disY_opt = make_optimizer(model=disY,
                              alpha=config.adam['alpha'],
                              beta1=config.adam['beta1'],
                              beta2=config.adam['beta2'])

    print('----- Make updater -----')
    updater = CinCGANUpdater(models=(gen, gen2, disY),
                             iterator=train_iter,
                             optimizer={
                                 'gen': gen_opt,
                                 'gen2': gen2_opt,
                                 'disY': disY_opt
                             },
                             device=args.gpu)

    print('----- Save configs -----')

    def create_result_dir(base_dir, output_dir, config_path, config):
        """https://github.com/pfnet-research/sngan_projection/blob/master/train.py"""
        result_dir = os.path.join(base_dir, output_dir)
        if not os.path.exists(result_dir):
            os.makedirs(result_dir)
        if not os.path.exists('{}/init'.format(result_dir)):
            os.makedirs('{}/init'.format(result_dir))

        def copy_to_result_dir(fn, result_dir):
            bfn = os.path.basename(fn)
            shutil.copy(fn, '{}/{}'.format(result_dir, bfn))

        copy_to_result_dir(os.path.join(base_dir, config_path), result_dir)

        copy_to_result_dir(os.path.join(base_dir, config.network['fn']),
                           result_dir)
        copy_to_result_dir(os.path.join(base_dir, config.updater['fn']),
                           result_dir)
        copy_to_result_dir(
            os.path.join(base_dir, config.dataset['training_fn']), result_dir)

    create_result_dir(args.base, args.out, args.config_path, config)

    print('----- Make trainer -----')
    trainer = training.Trainer(updater, (config.iteration, 'iteration'),
                               out=os.path.join(args.base, args.out))

    # Set up logging
    snapshot_interval = (config.snapshot_interval, 'iteration')
    display_interval = (config.display_interval, 'iteration')
    evaluation_interval = (config.evaluation_interval, 'iteration')
    trainer.extend(
        extensions.snapshot(filename='snapshot_iter_{.updater.iteration}.npz'),
        trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        gen, filename='gen_iter_{.updater.iteration}.npz'),
                   trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        gen2, filename='gen2_iter_{.updater.iteration}.npz'),
                   trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        disY, filename='disY_iter_{.updater.iteration}.npz'),
                   trigger=snapshot_interval)

    # Write a log of evaluation statistics for each epoch
    trainer.extend(extensions.LogReport(trigger=display_interval))
    trainer.extend(reconstruct_hr_img(gen, gen2,
                                      os.path.join(args.base, args.out),
                                      train_iter, train),
                   trigger=evaluation_interval,
                   priority=extension.PRIORITY_WRITER)

    # Print a progress bar to stdout
    trainer.extend(extensions.ProgressBar(update_interval=10))

    # Save two plot images to the result dir
    if extensions.PlotReport.available():
        trainer.extend(
            extensions.PlotReport(['gen/loss_gen1'],
                                  'iteration',
                                  file_name='gen_loss.png',
                                  trigger=display_interval))
        trainer.extend(
            extensions.PlotReport([
                'disY/loss_dis1_fake', 'disY/loss_dis1_real', 'disY/loss_dis1'
            ],
                                  'iteration',
                                  file_name='dis_loss.png',
                                  trigger=display_interval))
        trainer.extend(
            extensions.PlotReport(['gen/loss_gen', 'disY/loss_dis1'],
                                  'iteration',
                                  file_name='adv_loss.png',
                                  trigger=display_interval))

    if args.resume:
        # Resume from a snapshot
        chainer.serializers.load_npz(args.resume, trainer)

    print('----- Run the training -----')
    reset_seed(0)
    trainer.run()
Пример #15
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', '-g', type=int, default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--base', '-B', default=os.path.dirname(os.path.abspath(__file__)),
                        help='base directory path of program files')
    parser.add_argument('--config_path', type=str, default='configs/training.yml',
                        help='path to config file')
    parser.add_argument('--out', '-o', default= 'results/inference/test',
                        help='Directory to output the result')

    parser.add_argument('--model', '-m', default='',
                        help='Load model data(snapshot)')

    parser.add_argument('--model2', '-m2', default='',
                        help='Load model data(snapshot)')


    parser.add_argument('--root', '-R', default=os.path.dirname(os.path.abspath(__file__)),
                        help='Root directory path of input image')

    parser.add_argument('--save_flag', type=bool, default=False,
                        help='Decision flag whether to save criteria image')
    parser.add_argument('--filename_arg', '-F',type=str, default='test_fn',
                        help='Which do you want to use val_fn or test_fn')

    args = parser.parse_args()

    config = yaml_utils.Config(yaml.load(open(os.path.join(args.base, args.config_path))))
    print('GPU: {}'.format(args.gpu))
    LR_PATCH_SIDE, HR_PATCH_SIDE = config.patch['patchside'], config.patch['patchside']
    LR_PATCH_SIZE, HR_PATCH_SIZE = int(LR_PATCH_SIDE**3), int(HR_PATCH_SIDE**3)
    LR_MIN, LR_MAX = config.patch['lrmin'], config.patch['lrmax']
    print('HR PATCH SIZE: {}'.format(HR_PATCH_SIZE))
    print('LR PATCH SIZE: {}'.format(LR_PATCH_SIZE))
    print('')

    gen = Generator_SR()


    chainer.serializers.load_npz(args.model, gen)


    if args.gpu >= 0:
        chainer.backends.cuda.set_max_workspace_size(1024 * 1024 * 1024) # 1GB
        chainer.backends.cuda.get_device_from_id(args.gpu).use()
        gen.to_gpu()


    xp = gen.xp


    def create_result_dir(base_dir, output_dir, config_path, config):
        """https://github.com/pfnet-research/sngan_projection/blob/master/train.py"""
        result_dir = os.path.join(base_dir, output_dir)
        if not os.path.exists(result_dir):
            os.makedirs(result_dir)

        def copy_to_result_dir(fn, result_dir):
            bfn = os.path.basename(fn)
            shutil.copy(fn, '{}/{}'.format(result_dir, bfn))

        copy_to_result_dir(
            os.path.join(base_dir, config_path), result_dir)

        copy_to_result_dir(
            os.path.join(base_dir, config.network['fn']), result_dir)
        copy_to_result_dir(
            os.path.join(base_dir, config.updater['fn']), result_dir)
        copy_to_result_dir(
            os.path.join(base_dir, config.dataset[args.filename_arg]), result_dir)

    create_result_dir(args.base,  args.out, args.config_path, config)

    # Read data
    path_pairs = []
    with open(os.path.join(args.base, config.dataset[args.filename_arg])) as paths_file:
        for line in paths_file:
            line = line.split()
            if not line : continue
            path_pairs.append(line[:])

    print(' Inference start')
    for i in path_pairs:
        print('    Tri from: {}'.format(i[0]))
        print('    Org from: {}'.format(i[1]))
        #Read data and reshape
        sitkTri = sitk.ReadImage(os.path.join(args.root, i[0]))
        tri = (sitk.GetArrayFromImage(sitkTri).astype("float32")/127.5)-1.
        # Calculate maximum of number of patch at each side
        ze, ye, xe = tri.shape
        xm = int(math.ceil((float(xe)/float(config.patch['interval']))))
        ym = int(math.ceil((float(ye)/float(config.patch['interval']))))
        zm = int(math.ceil((float(ze)/float(config.patch['interval']))))
        edge = 16.


        margin = ((8, config.patch['patchside']),
                  (8, config.patch['patchside']),
                  (8, config.patch['patchside']))
        tri = np.pad(tri, margin, 'edge')
        tri = chainer.Variable(xp.array(tri[np.newaxis, np.newaxis, :], dtype=xp.float32))

        inferred_map = np.zeros((ze+config.patch['patchside'],ye+config.patch['patchside'], xe+config.patch['patchside']))

        overlap_count = np.zeros(inferred_map.shape)

        # Patch loop
        print('     #Patches {}'.format(xm*ym*zm))

        for s in range(xm*ym*zm):
            xi = int(s%xm)*config.patch['interval']
            yi = int((s%(ym*xm))/xm)*config.patch['interval']
            zi = int(s/(ym*xm))*config.patch['interval']

            # Extract patch from original image
            patch = tri[:,:,zi:zi+config.patch['patchside']+edge,yi:yi+config.patch['patchside']+edge,xi:xi+config.patch['patchside']+edge]
            with chainer.using_config('train', False), chainer.using_config('enable_backprop', False):
                inferred_patch = gen(patch)# T48*48*48
                inferred_patch = cropping(inferred_patch, 32)  # 32*32*32
            # Generate probability map
            inferred_patch = inferred_patch.data

            if args.gpu >= 0:
                inferred_patch = chainer.backends.cuda.to_cpu(inferred_patch)

            inferred_map[zi:zi+config.patch['patchside'],yi:yi+config.patch['patchside'],xi:xi+config.patch['patchside']] += np.squeeze(inferred_patch)
            overlap_count[zi:zi+config.patch['patchside'],yi:yi+config.patch['patchside'],xi:xi+config.patch['patchside']] += 1
        print('     Save image')
        inferred_map = inferred_map[:ze,:ye,:xe]
        overlap_count = overlap_count[:ze,:ye,:xe]
        inferred_map /= overlap_count
        inferred_map = ((inferred_map+1.)/2*255)


        # Save prediction map
        inferenceImage = sitk.GetImageFromArray(inferred_map)
        inferenceImage.SetSpacing(sitkTri.GetSpacing())
        inferenceImage.SetOrigin(sitkTri.GetOrigin())

        result_dir = os.path.join(args.base, args.out)
        if not os.path.exists(result_dir):
            os.makedirs(result_dir)
        fn = os.path.splitext(os.path.basename(i[0]))[0]
        sitk.WriteImage(inferenceImage, '{}/{}.mhd'.format(result_dir, fn))

        print('     Start evaluations ')
        sitkGt = sitk.ReadImage(os.path.join(args.root, i[1]))
        gt = sitk.GetArrayFromImage(sitkGt).astype("float")
        start = time.time()
        mse_const = calc_mse(gt, inferred_map)
        psnr_const = calc_psnr(gt, inferred_map)
        ssim_const = calc_ssim(gt, inferred_map)
        zncc_const = calc_zncc(gt, inferred_map)
        diff_spe_const, diff_spe, diff_ang_const, diff_ang = calc_score_on_fft_domain(gt, inferred_map)
        df = pd.DataFrame({'MSE': [mse_const], 'PSNR':[psnr_const], 'SSIM':[ssim_const], 'ZNCC':[zncc_const], 'MAE-Power':[diff_spe_const], 'MAE-Angle':[diff_ang_const]})
        df.to_csv('{}/results.csv'.format(result_dir), index=False, encoding='utf-8', mode='w')
        print('     Finsish evaluations: {:.3f} [s]'.format(time.time() - start))
        if args.save_flag:
            def array2sitk(arr, spacing=[], origin=[]):
                if not len(spacing) == arr.ndim or len(origin) == arr.ndim:
                    print("Dimension Error")
                    quit()

            sitkImg = sitk.GetImageFromArray(arr)
            sitkImg.SetSpacing(spacing)
            sitkImg.SetOrigin(origin)
            return sitkImg

            diffSpeImage = array2sitk(diff_spe, [1,1,1], [0,0,0])
            diffAngImage = array2sitk(diff_ang, [1,1,1], [0,0,0])
            sitk.WriteImage(diffSpeImage, '{}/{}-diff_power_spe.mhd'.format(result_dir, fn))
            sitk.WriteImage(diffAngImage, '{}/{}-diff_angle.mhd'.format(result_dir, fn))

    print(' Inference done')
Пример #16
0
def main():
    parser = argparse.ArgumentParser(description='Train 3D-Unet')
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=0,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--root',
                        '-R',
                        default=os.path.dirname(os.path.abspath(__file__)),
                        help='Root directory path of input image')
    parser.add_argument('--config_path',
                        type=str,
                        default='configs/base.yml',
                        help='path to config file')
    parser.add_argument('--out',
                        '-o',
                        default='Results_trM1_ValiM2',
                        help='Directory to output the result')

    parser.add_argument('--model', '-m', default='', help='Load model data')
    parser.add_argument('--resume',
                        '-res',
                        default='',
                        help='Resume the training from snapshot')

    parser.add_argument('--training_list',
                        default='configs/M1.txt',
                        help='Path to training image list file')
    parser.add_argument('--training_coordinate_list',
                        type=str,
                        default='configs/M1.csv')

    parser.add_argument('--validation_list',
                        default='configs/M2.txt',
                        help='Path to validation image list file')
    parser.add_argument('--validation_coordinate_list',
                        type=str,
                        default='configs/M2.csv')

    args = parser.parse_args()
    '''
    'https://stackoverflow.com/questions/21005822/what-does-os-path-abspathos-path-joinos-path-dirname-file-os-path-pardir'
    '''
    config = yaml_utils.Config(
        yaml.load(
            open(os.path.join(os.path.dirname(__file__), args.config_path))))
    print('GPU: {}'.format(args.gpu))
    print('# Minibatch-size: {}'.format(config.batchsize))
    print('# iteration: {}'.format(config.iteration))
    print('')

    # Load the datasets
    train = UnetDataset(args.root, args.training_list,
                        args.training_coordinate_list,
                        config.patch['patchside'])
    train_iter = chainer.iterators.SerialIterator(train,
                                                  batch_size=config.batchsize)

    validation = UnetDataset(args.root, args.validation_list,
                             args.validation_coordinate_list,
                             config.patch['patchside'])
    validation_iter = chainer.iterators.SerialIterator(
        validation, batch_size=config.batchsize, repeat=False, shuffle=False)

    # Set up a neural network to train
    print('Set up a neural network to train')
    unet = UNet3D(2)
    if args.model:
        chainer.serializers.load_npz(args.model, gen)

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        unet.to_gpu()

    #Set up an optimizer
    def make_optimizer(model, alpha=0.0001, beta1=0.9, beta2=0.999):
        optimizer = chainer.optimizers.Adam(alpha=alpha,
                                            beta1=beta1,
                                            beta2=beta2)
        optimizer.setup(model)
        return optimizer

    opt_unet = make_optimizer(model=unet,
                              alpha=config.adam['alpha'],
                              beta1=config.adam['beta1'],
                              beta2=config.adam['beta2'])
    #Set up a trainer
    updater = Unet3DUpdater(models=(unet),
                            iterator=train_iter,
                            optimizer={'unet': opt_unet},
                            device=args.gpu)

    def create_result_dir(base, result_dir, config_path, config):
        """https://github.com/pfnet-research/sngan_projection/blob/master/train.py"""
        if not os.path.exists(result_dir):
            os.makedirs(result_dir)

        def copy_to_result_dir(fn, result_dir):
            bfn = os.path.basename(fn)
            shutil.copy(fn, '{}/{}'.format(result_dir, bfn))

        copy_to_result_dir(config_path, result_dir)
        copy_to_result_dir(os.path.join(base, config.unet['fn']), result_dir)

        copy_to_result_dir(os.path.join(base, config.updater['fn']),
                           result_dir)

    out = os.path.join(args.root, args.out)
    config_path = os.path.join(os.path.dirname(__file__), args.config_path)
    create_result_dir(args.root, out, config_path, config)

    trainer = training.Trainer(updater, (config.iteration, 'iteration'),
                               out=out)
    #serializers.load_npz('C:\\Users\\yourb\\Documents\\GitHub\\3D-Unet\\Results_trM1_ValiM2\\snapshot_iter_10500.npz', trainer)

    # Set up logging
    snapshot_interval = (config.snapshot_interval, 'iteration')
    display_interval = (config.display_interval, 'iteration')
    evaluation_interval = (config.evaluation_interval, 'iteration')
    trainer.extend(UNet3DEvaluator(validation_iter, unet, device=args.gpu),
                   trigger=evaluation_interval)
    trainer.extend(
        extensions.snapshot(filename='snapshot_iter_{.updater.iteration}.npz'),
        trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(
        unet, filename=unet.__class__.__name__ + '_{.updater.iteration}.npz'),
                   trigger=snapshot_interval)

    # Write a log of evaluation statistics for each epoch
    trainer.extend(extensions.LogReport(trigger=display_interval))

    # Print a progress bar to stdout
    trainer.extend(extensions.ProgressBar(update_interval=10))

    # Print selected entries of the log to stdout
    #report_keys = ['epoch', 'iteration', 'unet/loss','unet/dice','vali/unet/loss','vali/unet/dice']
    report_keys = ['iteration', 'unet/dice', 'vali/unet/dice']

    trainer.extend(extensions.PrintReport(report_keys),
                   trigger=display_interval)

    # Use linear shift
    ext_opt_unet = extensions.LinearShift(
        'alpha', (config.adam['alpha'], 0.),
        (config.iteration_decay_start, config.iteration), opt_unet)
    trainer.extend(ext_opt_unet)

    # Save two plot images to the result dir
    if extensions.PlotReport.available():
        #trainer.extend(extensions.PlotReport(['unet/loss','vali/unet/loss'], 'iteration', file_name='unet_loss.png',trigger=display_interval))
        trainer.extend(
            extensions.PlotReport(['unet/dice', 'vali/unet/dice'],
                                  'iteration',
                                  file_name='unet_dice.png',
                                  trigger=display_interval))

    if args.resume:
        # Resume from a snapshot
        print("Resume training with snapshot:{}".format(args.resume))
        chainer.serializers.load_npz(args.resume, trainer)

    # Run the training
    chainer.config.autotune = True
    print('Start training')
    trainer.run()
Пример #17
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--base',
                        '-B',
                        default=os.path.dirname(os.path.abspath(__file__)),
                        help='base directory path of program files')
    parser.add_argument('--config_path',
                        type=str,
                        default='configs/training.yml',
                        help='path to config file')
    parser.add_argument('--out',
                        '-o',
                        default='results/projection',
                        help='Directory to output the result')

    parser.add_argument('--model',
                        '-m',
                        default='',
                        help='Load model data(snapshot)')

    parser.add_argument('--root',
                        '-R',
                        default=os.path.dirname(os.path.abspath(__file__)),
                        help='Root directory path of input image')

    parser.add_argument('--num_patches',
                        type=int,
                        default=500,
                        help='number of patches that you want to extract')
    parser.add_argument('--filename',
                        type=str,
                        default='training_fn',
                        help='Which do you want to use val_fn or test_fn')

    parser.add_argument('--dataset', help='path to dataset pickle')
    args = parser.parse_args()

    if args.dataset:
        with open(args.dataset, 'rb') as f:
            dataset = pickle.load(f)
        out = '{}/{}'.format(args.out, args.filename)
        output_dir = os.path.join(args.base, out)
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)
        plot(dataset, output_dir)
        return

    print('----- Read configs ------')
    config = yaml_utils.Config(
        yaml.load(open(os.path.join(args.base, args.config_path))))
    print('GPU: {}'.format(args.gpu))

    print('----- Load model ------')
    encoder = Encoder()
    chainer.serializers.load_npz(args.model, encoder)
    if args.gpu >= 0:
        chainer.backends.cuda.set_max_workspace_size(1024 * 1024 * 1024)  # 1GB
        chainer.backends.cuda.get_device_from_id(args.gpu).use()
        encoder.to_gpu()
    xp = encoder.xp

    print('----- Save configs ------')

    def create_result_dir(base_dir, output_dir, config_path, config):
        """https://github.com/pfnet-research/sngan_projection/blob/master/train.py"""
        result_dir = os.path.join(base_dir, output_dir)
        if not os.path.exists(result_dir):
            os.makedirs(result_dir)
        if not os.path.exists('{}/model'.format(result_dir)):
            os.makedirs('{}/model'.format(result_dir))

        def copy_to_result_dir(fn, result_dir):
            bfn = os.path.basename(fn)
            shutil.copy(fn, '{}/{}'.format(result_dir, bfn))

        copy_to_result_dir(os.path.join(base_dir, config_path), result_dir)

        copy_to_result_dir(os.path.join(base_dir, config.network['fn']),
                           result_dir)
        copy_to_result_dir(os.path.join(base_dir, config.dataset['val_fn']),
                           result_dir)
        copy_to_result_dir(
            os.path.join(base_dir, config.dataset['training_fn']), result_dir)
        copy_to_result_dir(os.path.join(base_dir, args.model),
                           '{}/model'.format(result_dir))

    out = '{}/{}'.format(args.out, args.filename)
    create_result_dir(args.base, out, args.config_path, config)
    output_dir = os.path.join(args.base, out)

    print('----- Load dataset -----')
    dataset = CvaeDataset(args.root,
                          os.path.join(args.base,
                                       config.dataset[args.filename]),
                          config.patch['patchside'],
                          [config.patch['lrmin'], config.patch['lrmax']],
                          augmentation=False)

    print('----- Start projection ------')
    index_list = []
    latent_list = []
    for n in range(args.num_patches):
        idx = np.random.randint(0, dataset.__len__())
        patch = dataset.get_example(idx)
        patch = chainer.Variable(
            xp.array(patch[np.newaxis, :], dtype=xp.float32))
        with chainer.using_config('train', False), chainer.using_config(
                'enable_backprop', False):
            mu, _ = encoder(patch)

        mu = mu.array
        if args.gpu >= 0:
            mu = chainer.backends.cuda.to_cpu(mu)
        mu = np.squeeze(mu, axis=0)

        latent_list.append(mu)
        index_list.append(idx)

    latent_list = np.asarray(latent_list)
    print('----- Start save -----')
    np.savetxt('{}/coordinate_idx.csv'.format(output_dir),
               np.asarray(index_list, dtype=int),
               delimiter=',')
    np.savetxt('{}/latent_data.csv'.format(output_dir),
               latent_list,
               delimiter=',')
    with open('{}/latent_data.pickle'.format(output_dir), 'wb') as f:
        pickle.dump(latent_list, f)

    print('----- plot results ------')
    plot(latent_list, output_dir)
Пример #18
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--base',
                        '-B',
                        default=os.path.dirname(os.path.abspath(__file__)),
                        help='base directory path of program files')
    parser.add_argument('--config_path',
                        type=str,
                        default='configs/base.yml',
                        help='path to config file')
    parser.add_argument('--out',
                        '-o',
                        default='results/inference',
                        help='Directory to output the result')

    parser.add_argument('--model',
                        '-m',
                        default='',
                        help='Load model data(snapshot)')

    parser.add_argument('--root',
                        '-R',
                        default=os.path.dirname(os.path.abspath(__file__)),
                        help='Root directory path of input image')
    args = parser.parse_args()
    config = yaml_utils.Config(
        yaml.load(open(os.path.join(args.base, args.config_path))))
    print('GPU: {}'.format(args.gpu))
    print('')

    hr_patchside = config.patch['patchside']
    config.patch['patchside'] = int(config.patch['patchside'] /
                                    config.upsampling_rate)

    gen = ESPCN(r=config.upsampling_rate)
    chainer.serializers.load_npz(args.model, gen)
    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        gen.to_gpu()
    xp = gen.xp

    # Read test list
    path_pairs = []
    with open(os.path.join(args.base,
                           config.dataset['test_fn'])) as paths_file:
        for line in paths_file:
            line = line.split()
            if not line: continue
            path_pairs.append(line[:])

    case_list = []
    ssim_list = []
    psnr_list = []
    for i in path_pairs:
        print('   LR from: {}'.format(i[0]))
        print('   HR from: {}'.format(i[1]))
        sitkLR = sitk.ReadImage(os.path.join(args.root, i[0]))
        lr = sitk.GetArrayFromImage(sitkLR).astype("float32")

        # Calculate maximum of number of patch at each side
        ze, ye, xe = lr.shape
        xm = int(math.ceil((float(xe) / float(config.patch['patchside']))))
        ym = int(math.ceil((float(ye) / float(config.patch['patchside']))))
        zm = int(math.ceil((float(ze) / float(config.patch['patchside']))))

        margin = ((0, config.patch['patchside']),
                  (0, config.patch['patchside']), (0,
                                                   config.patch['patchside']))
        lr = np.pad(lr, margin, 'edge')
        lr = chainer.Variable(
            xp.array(lr[np.newaxis, np.newaxis, :], dtype=xp.float32))

        zh, yh, xh = ze * config.upsampling_rate, ye * config.upsampling_rate, xe * config.upsampling_rate
        hr_map = np.zeros(
            (zh + hr_patchside, yh + hr_patchside, xh + hr_patchside))

        # Patch loop
        for s in range(xm * ym * zm):
            xi = int(s % xm) * config.patch['patchside']
            yi = int((s % (ym * xm)) / xm) * config.patch['patchside']
            zi = int(s / (ym * xm)) * config.patch['patchside']

            # Extract patch from original image
            patch = lr[:, :, zi:zi + config.patch['patchside'],
                       yi:yi + config.patch['patchside'],
                       xi:xi + config.patch['patchside']]
            with chainer.using_config('train', False), chainer.using_config(
                    'enable_backprop', False):
                hr_patch = gen(patch)

            # Generate HR map
            hr_patch = hr_patch.data
            if args.gpu >= 0:
                hr_patch = chainer.cuda.to_cpu(hr_patch)
            zi, yi, xi = zi * config.upsampling_rate, yi * config.upsampling_rate, xi * config.upsampling_rate
            hr_map[zi:zi + hr_patchside, yi:yi + hr_patchside,
                   xi:xi + hr_patchside] = hr_patch[0, :, :, :]

        print('Save image')
        hr_map = hr_map[:zh, :yh, :xh]

        # Save HR map
        inferenceHrImage = sitk.GetImageFromArray(hr_map)
        lr_spacing = sitkLR.GetSpacing()
        new_spacing = [i / config.upsampling_rate for i in lr_spacing]
        inferenceHrImage.SetSpacing(new_spacing)
        inferenceHrImage.SetOrigin(sitkLR.GetOrigin())
        result_dir = os.path.join(args.base, args.out)
        if not os.path.exists(result_dir):
            os.makedirs(result_dir)
        fn = os.path.splitext(os.path.basename(i[0]))[0]
        sitk.WriteImage(inferenceHrImage, '{}/{}.mhd'.format(result_dir, fn))

        # Calc metric
        case_list.append(os.path.basename(i[0]))
        # PSNR
        sitkHR = sitk.ReadImage(os.path.join(args.root, i[1]))
        hr_gt = sitk.GetArrayFromImage(sitkHR).astype("float")

        psnr_const = psnr(hr_gt,
                          hr_map,
                          dynamic_range=np.amax(hr_gt) - np.amin(hr_gt))
        print('PSNR: {}'.format(psnr_const))
        psnr_list.append(psnr_const)
        # SSIM
        ssim_const = ssim(hr_gt,
                          hr_map,
                          dynamic_range=np.amax(hr_gt) - np.amin(hr_gt),
                          gaussian_weights=True,
                          use_sample_covariance=False)
        print('SSIM: {}'.format(ssim_const))
        ssim_list.append(ssim_const)

    df = pd.DataFrame({
        'Case': case_list,
        'PSNR': psnr_list,
        'SSIM': ssim_list
    })
    df.to_csv('{}/result.csv'.format(result_dir),
              index=False,
              encoding="utf-8",
              mode='w')
Пример #19
0
def main():

    parser = argparse.ArgumentParser(description='Train pre')

    parser.add_argument('--base',
                        '-B',
                        default=os.path.dirname(os.path.abspath(__file__)),
                        help='base directory path of program files')
    parser.add_argument('--config_path',
                        type=str,
                        default='configs/cutting_position.yml',
                        help='path to config file')
    parser.add_argument('--out',
                        '-o',
                        default='results/histogram_base_KL_debug',
                        help='Directory to output the result')
    parser.add_argument('--root',
                        '-R',
                        default=os.path.dirname(os.path.abspath(__file__)),
                        help='Root directory path of input image')

    args = parser.parse_args()

    config = yaml_utils.Config(
        yaml.load(open(os.path.join(args.base, args.config_path))))

    #make output dir
    result_dir = os.path.join(args.base, args.out)
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)

    mu1 = [-10, 0]
    mu2 = [10, 0]
    cov = [[2, 0], [0, 2]]
    total = 45000  #num of data
    loop = 100  # num of loop
    epshiron = [2.22e-6, 2.22e-7, 2.22e-8]
    num_bins = np.arange(0.1, 5.0, 0.1, dtype=float)

    #Scatter_plot
    t1, v1 = np.random.multivariate_normal(mu1, cov, total).T
    t2, v2 = np.random.multivariate_normal(mu2, cov, total).T
    plt.scatter(t1, v1, c='cyan')
    plt.scatter(t2, v2, c='green')
    plt.ylabel('y')
    plt.xlabel('x')
    plt.show()

    print('----- Start -----')
    for e in range(len(epshiron)):
        ep = epshiron[e]
        kld = np.zeros(loop)
        x_bin = np.zeros(loop)
        y_bin = np.zeros(loop)
        for num_b in range(len(num_bins)):
            num_bin = num_bins[num_b]
            for i in range(loop):

                #make datas
                x1, y1 = np.random.multivariate_normal(mu1, cov, total).T
                x2, y2 = np.random.multivariate_normal(mu2, cov, total).T

                # join
                x3 = np.hstack((x1, x2))
                y3 = np.hstack((y1, y2))

                #calc num of bins
                binx = int(
                    float(abs(int(np.min(x3) - 1)) + int(np.max(x3) + 1)) *
                    num_bin)
                biny = int(
                    float(abs(int(np.min(y3) - 1)) + int(np.max(y3) + 1)) *
                    num_bin)
                x_bin[i] = binx
                y_bin[i] = biny

                # make two histograms
                hist_lr, _, _ = np.histogram2d(
                    x1,
                    y1,
                    bins=[binx, biny],
                    range=[[int(np.min(x3) - 1.),
                            int(np.max(x3) + 1.)],
                           [int(np.min(y3) - 1.),
                            int(np.max(y3) + 1.)]])
                hist_hr, _, _ = np.histogram2d(
                    x2,
                    y2,
                    bins=[binx, biny],
                    range=[[int(np.min(x3) - 1.),
                            int(np.max(x3) + 1.)],
                           [int(np.min(y3) - 1.),
                            int(np.max(y3) + 1.)]])
                if (np.sum(hist_hr) != total or np.sum(hist_lr) != total):
                    sys.exit()

                #calc histogram base KL divergence
                kld[i] = KLD(hist_lr, hist_hr, ep)

            #save info
            df = pd.DataFrame({
                'ep': [ep],
                'num_b': [num_bin],
                'KLD_mean': [np.mean(kld)],
                'KLD_std': [np.std(kld)],
                'binx_mean': [np.mean(x_bin)],
                'binx_std': [np.std(x_bin)],
                'biny_mean': [np.mean(y_bin)],
                'biny_std': [np.std(y_bin)]
            })
            if (e == 0 and num_b == 0.1):
                df.to_csv('{}/results.csv'.format(result_dir),
                          index=False,
                          encoding='utf-8',
                          mode='a',
                          header=True)
            df.to_csv('{}/results_another3.csv'.format(result_dir),
                      index=False,
                      encoding='utf-8',
                      mode='a',
                      header=None)

    print('----- Finish -----')