Esempio n. 1
0
def label_cells():
    parser = argparse.ArgumentParser(description='Label cells.')
    parser.add_argument('input',
                        help="Input image filename",
                        metavar='<input>')
    parser.add_argument('output',
                        help="Output image filename",
                        metavar='<output>')
    parser.add_argument(
        '-b',
        '--block_size',
        help="Block size for local threshold (default:{})".format(
            _default_block_size),
        default=_default_block_size,
        type=int,
        metavar='<size>')
    parser.add_argument(
        '--offset',
        help="Offset for local threshold (default:{})".format(_default_offset),
        default=_default_offset,
        type=int,
        metavar='<n>')

    args = parser.parse_args()
    image = _load_image(args.input)
    wall = _extract_wall(image, args.block_size, args.offset)
    cells = _label_cells(wall)
    if os.path.splitext(args.output)[1] in ['.mhd', '.mha']:
        mhd.write(args.output, cells)
    else:
        rgba = color_cells(cells)
        Image.fromarray(rgba).save(args.output)
Esempio n. 2
0
def cv(k_index, train_index, test_index):
    outdir = os.path.join(base_outdir, 'k{}'.format(k_index))
    os.makedirs(outdir, exist_ok=True)

    for ci, cid in enumerate(tqdm.tqdm(ids_test)):
        x, h = mhd.read_memmap(os.path.join(mhd_dir, cid + '.mha'))
        y, _ = mhd.read(os.path.join(label_dir, cid + '.mha'))
        valid_zs = [(slab_thickness // 2 <= z < len(x) - slab_thickness // 2)
                    and np.any(lung[z] > 0) for z in range(len(x))]
        zmin = np.min(np.where(valid_zs))
        zmax = np.max(np.where(valid_zs))
        seeds = [SlabSeed(x, x, i) for i in range(zmin, zmax + 1)]
        p = model.predict_generator(SlabGenerator(seeds,
                                                  slab_thickness,
                                                  batch_size,
                                                  False,
                                                  transform=utils.transform,
                                                  transpose=False),
                                    max_queue_size=workers + 1,
                                    workers=workers,
                                    use_multiprocessing=workers > 1)
        p = np.squeeze(p)
        label = np.argmax(p, axis=-1).astype(np.uint8)
        label = np.pad(label, ((zmin, len(x) - zmax - 1), (0, 0), (0, 0)),
                       mode='constant',
                       constant_values=0)
        h['CompressedData'] = True
        mhd.write(os.path.join(image_outdir, cid + '.mha'), label, h)
Esempio n. 3
0
def extract_wall():
    parser = argparse.ArgumentParser(description='Extract cell wall.')
    parser.add_argument('input',
                        help="Input image filename",
                        metavar='<input>')
    parser.add_argument('output',
                        help="Output image filename",
                        metavar='<output>')
    parser.add_argument(
        '-b',
        '--block_size',
        help="Block size for local threshold (default:{})".format(
            _default_block_size),
        default=_default_block_size,
        type=int,
        metavar='<size>')
    parser.add_argument(
        '--offset',
        help="Offset for local threshold (default:{})".format(_default_offset),
        default=_default_offset,
        type=int,
        metavar='<n>')

    args = parser.parse_args()
    image = _load_image(args.input)
    wall = _extract_wall(image, args.block_size, args.offset)
    if os.path.splitext(args.output) in ['.mhd', '.mha']:
        mhd.write(args.output, wall.astype(np.uint8))
    else:
        cmap = np.array([[0, 0, 0, 0], [255, 0, 0, 255]]).astype(np.uint8)
        Image.fromarray(cmap[wall]).save(args.output)
Esempio n. 4
0
def modefilt():
    parser = argparse.ArgumentParser(description='Mode filter.')
    parser.add_argument('input', help="Input mhd filename", metavar='<input>')
    parser.add_argument('-o',
                        '--output',
                        help="Output mhd filename",
                        metavar='<filename>',
                        default='filtered.mha')
    parser.add_argument('--size',
                        help="Filter size",
                        metavar='<N>',
                        default=3,
                        type=int)

    args = parser.parse_args()
    label, h = mhd.read(args.input)
    filtered = _label_filters.modefilt3(label, args.size, 0)
    mhd.write(args.output, filtered, h)
Esempio n. 5
0
def median_filter():
    parser = argparse.ArgumentParser(
        description='Median filter for mhd image.')
    parser.add_argument('input', help="Input filename", metavar='<input>')
    parser.add_argument('-o',
                        '--output',
                        help="Output filename. Defualt:%(default)s",
                        metavar='<output>',
                        default='filtered.mha')
    parser.add_argument('-s',
                        '--size',
                        help="Optional argument. Default:%(default)s",
                        metavar='<n>',
                        default=3,
                        type=int)

    args = parser.parse_args()
    import mhd
    from scipy.ndimage.filters import median_filter
    image, h = mhd.read(args.input)
    filtered = median_filter(image, args.size)
    mhd.write(args.output, filtered, h)
Esempio n. 6
0
                print("epoch = {}, loss = {:.5f}".format(epochs + 1, loss.data))
                optimizer.step()
                out = output.data.cpu()
                running_loss += loss.item()
                if i % 2000 == 1999:
                    print('[%d,%5d] loss: %.3f' % (epochs + 1, i + 1, running_loss / 2000))
                    running_loss = 0.0

    result_dir = os.path.join(exp_dir, 'g{0}'.format(epochs))
    os.makedirs(result_dir, exist_ok=True)
    # save model
    net.state_dict()
    torch.save(net, os.path.join(result_dir, 'unet_model_weights.h5'))

    for i, test_IDs in enumerate(groups):
        train_ID = [ID for ID in dataset.keys()]
    for test_ID in tqdm.tqdm(train_ID, desc='Testing'):
        x_test = np.fromstring(dataset[test_ID]['x'])
        predict_label = (np.argmax(out, axis=0)).numpy()
        mhd.write(os.path.join(result_img_dir, test_ID), predict_label)
        datay = np.resize((dataset[test_ID]['y']), (2, 68, 68))
        JIs[test_ID], DCs[test_ID] = evaluate(predict_label, datay)
        refined = refine_labels(predict_label)
        mhd.write(os.path.join(result_img_dir, 'refined_' + test_ID), refined)
        refined_JIs[test_ID], refined_DCs[test_ID] = evaluate(refined, datay)

np.savetxt(os.path.join(exp_dir, 'refined_JI.csv'), np.stack([refined_JIs[ID] for ID in all_IDs]),
           delimiter=",", fmt='%g')
np.savetxt(os.path.join(exp_dir, 'refined_Dice.csv'), np.stack([refined_DCs[ID] for ID in all_IDs]),
           delimiter=",", fmt='%g')
Esempio n. 7
0
                   history.losses,
                   delimiter=",",
                   fmt='%g')

        #save model
        with open(os.path.join(result_dir, 'unet_model.json'), 'w') as f:
            f.write(model.to_json())
        model.save_weights(os.path.join(result_dir, 'unet_model_weights.h5'))

        for test_ID in test_IDs:
            x_test = dataset[test_ID]['x']
            predict_y = model.predict(x_test, batch_size=2, verbose=True)

            predict_label = np.argmax(predict_y, axis=3).astype(np.uint8)
            mhd.write(os.path.join(result_dir, test_ID + '.mhd'),
                      np.squeeze(predict_label.astype(np.uint8)),
                      header={'ElementSpacing': spacings[test_ID][::-1]})

            JIs[test_ID], DCs[test_ID], ASDs[test_ID] = evaluate(
                predict_label, np.squeeze(dataset[test_ID]['y']),
                spacings[test_ID])
            refined = postprocess.refine_label(predict_label)
            mhd.write(os.path.join(result_dir, 'refined_' + test_ID + '.mhd'),
                      np.squeeze(refined.astype(np.uint8)),
                      header={'ElementSpacing': spacings[test_ID][::-1]})
            refined_JIs[test_ID], refined_DCs[test_ID], refined_ASDs[
                test_ID] = evaluate(refined, np.squeeze(dataset[test_ID]['y']),
                                    spacings[test_ID])

    np.savetxt(os.path.join(exp_dir, 'JI.csv'),
               np.stack([JIs[ID] for ID in all_IDs]),