コード例 #1
0
                    type=str)
parser.add_argument("--coi",
                    "-c",
                    help="Class of interest",
                    type=int,
                    default=1)
parser.add_argument(
    "--target_dir",
    "-t",
    help="Path to the directory where the scripts will be saved",
    required=True,
    type=str)
args = parser.parse_args()

# load the base script
mkdir(args.target_dir)
with open(args.base_file, 'r') as f:
    lines = f.readlines()
    for method in METHODS:
        params = PARAMS[method]
        values = params.values()
        params = params.keys()
        prms = np.meshgrid(
            *[10**(np.arange(*v).astype(float)) for v in values])
        for n in range(prms[0].size):
            param_values = [str(p.item(n)) for p in prms]
            lines_ = []
            for line in lines:
                line = line.replace('<PARAMS>', '"' + ','.join(params) + '"')
                line = line.replace('<VALUES>',
                                    '"' + ','.join(param_values) + '"')
コード例 #2
0
ファイル: validation.py プロジェクト: MukaiGuy/neuralnets
def validate(net,
             data,
             labels,
             input_size,
             in_channels=1,
             classes_of_interest=(0, 1),
             batch_size=1,
             write_dir=None,
             val_file=None,
             track_progress=False,
             device=0,
             orientations=(0, ),
             normalization='unit'):
    """
    Validate a network on a dataset and its labels

    :param net: image-to-image segmentation network
    :param data: 3D array (Z, Y, X) representing the 3D image
    :param labels: 3D array (Z, Y, X) representing the 3D labels
    :param input_size: size of the inputs (either 2 or 3-tuple) for processing
    :param in_channels: Amount of subsequent slices that serve as input for the network (should be odd)
    :param classes_of_interest: index of the label of interest
    :param batch_size: batch size for processing
    :param write_dir: optionally, specify a directory to write the output
    :param val_file: optionally, specify a file to write the validation results
    :param track_progress: optionally, for tracking progress with progress bar
    :param device: GPU device where the computations should occur
    :param orientations: list of orientations to perform segmentation: 0-Z, 1-Y, 2-X (only for 2D based segmentation)
    :param normalization: type of data normalization (unit, z or minmax)
    :return: validation results, i.e. accuracy, precision, recall, f-score, jaccard and dice score
    """

    print_frm('Validating the trained network...')

    # compute segmentation for each orientation and average results
    segmentation = np.zeros((net.out_channels, *data.shape))
    for orientation in orientations:
        segmentation += segment(data,
                                net,
                                input_size,
                                in_channels=in_channels,
                                batch_size=batch_size,
                                track_progress=track_progress,
                                device=device,
                                orientation=orientation,
                                normalization=normalization)
    segmentation = segmentation / len(orientations)

    # compute metrics
    w = labels != 255
    comp_hausdorff = np.sum(labels == 255) == 0
    js = np.asarray([
        jaccard(segmentation[i], (labels == c).astype('float'), w=w)
        for i, c in enumerate(classes_of_interest)
    ])
    ams = np.asarray([
        accuracy_metrics(segmentation[i], (labels == c).astype('float'), w=w)
        for i, c in enumerate(classes_of_interest)
    ])
    for i, c in enumerate(classes_of_interest):
        if comp_hausdorff:
            h = hausdorff_distance(segmentation[i], labels)[0]
        else:
            h = -1

        # report results
        print_frm('Validation performance for class %d: ' % c)
        print_frm('    - Accuracy: %f' % ams[i, 0])
        print_frm('    - Balanced accuracy: %f' % ams[i, 1])
        print_frm('    - Precision: %f' % ams[i, 2])
        print_frm('    - Recall: %f' % ams[i, 3])
        print_frm('    - F1 score: %f' % ams[i, 4])
        print_frm('    - IoU: %f' % js[i])
        print_frm('    - Hausdorff distance: %f' % h)

    # report results
    print_frm('Validation performance mean: ')
    print_frm('    - Accuracy: %f' % np.mean(ams[:, 0]))
    print_frm('    - Balanced accuracy: %f' % np.mean(ams[:, 1]))
    print_frm('    - Precision: %f' % np.mean(ams[:, 2]))
    print_frm('    - Recall: %f' % np.mean(ams[:, 3]))
    print_frm('    - F1 score: %f' % np.mean(ams[:, 4]))
    print_frm('    - mIoU: %f' % np.mean(js))

    # write stuff if necessary
    if write_dir is not None:
        print_frm('Writing out the segmentation...')
        mkdir(write_dir)
        segmentation_volume = np.zeros(segmentation.shape[1:])
        for i, c in enumerate(classes_of_interest):
            segmentation_volume[segmentation[i] > 0.5] = c
        write_volume(segmentation_volume, write_dir, type='pngseq')
    if val_file is not None:
        np.save(val_file, np.concatenate((js[:, np.newaxis], ams), axis=1))
    return js, ams
コード例 #3
0
    lr_monitor = pl.callbacks.LearningRateMonitor(logging_interval='step')
    print_frm('Starting joint pretraining')
    print_frm('Training with loss: %s' % params['loss'])
    checkpoint_callback = ModelCheckpoint(save_top_k=5,
                                          verbose=True,
                                          monitor='val/mIoU',
                                          mode='max')
    trainer = train(net, train_loader, val_loader,
                    [lr_monitor, checkpoint_callback], params)
    unet = net.get_unet()
    """
        Testing the network
    """
    print_frm('Testing network final performance')
    validate(unet, trainer, test_loader, params)
    """
        Save the final model
    """
    print_frm('Saving final model')
    shutil.copyfile(trainer.checkpoint_callback.best_model_path,
                    os.path.join(trainer.log_dir, 'best_model.ckpt'))
    """
        Clean up
    """
    if args.clean_up:
        print_frm('Cleaning up')
        os.system('rm -r ' + os.path.join(trainer.log_dir, 'checkpoints'))
        mkdir(os.path.join(trainer.log_dir, 'pretraining'))
        os.system('mv ' + trainer.log_dir + '/events.out.tfevents.* ' +
                  os.path.join(trainer.log_dir, 'pretraining'))
コード例 #4
0
    # find k closest samples
    dists = distance_matrix(z_ref, z[d])
    inds = np.argsort(dists[0])
    for kk in range(k):
        closest_samples[d, kk, ...] = samples[d, inds[kk], ...]
        closest_dists[d, kk, ...] = dists[0, inds[kk]]

    # # apply u-map
    # print_frm('U-Map dimensionality reduction')
    # reducer = umap.UMAP()
    # embedding = reducer.fit_transform(z)

    # # show results
    # print_frm('Visualization')
    # for g in np.unique(doms):
    #     i = np.where(doms == g)
    #     plt.scatter(embedding[i, 0], embedding[i, 1], label=g)
    # plt.legend()
    # plt.show()

# save results
mkdir(args.log_dir)
np.save(os.path.join(args.log_dir, 'x_ref.npy'), x_ref)
np.save(os.path.join(args.log_dir, 'z_ref.npy'), z_ref)
np.save(os.path.join(args.log_dir, 'z.npy'), z)
np.save(os.path.join(args.log_dir, 'doms.npy'), doms)
np.save(os.path.join(args.log_dir, 'samples.npy'), samples)
np.save(os.path.join(args.log_dir, 'reconstructions.npy'), reconstructions)
np.save(os.path.join(args.log_dir, 'closest_samples.npy'), closest_samples)
np.save(os.path.join(args.log_dir, 'closest_dists.npy'), closest_dists)