コード例 #1
0
def process_slice(kspace, args):
    nkx, nky, nphases, ncoils = kspace.shape

    if 0 < args.crop_size < nkx:
        # crop along readout dimension
        images = fftc.ifftc(kspace, axis=0)
        images = center_crop(images, [args.crop_size, nky, nphases, ncoils])
        kspace = fftc.fftc(images, axis=0).astype(np.complex64)
        nkx = args.crop_size

    # simulate reduced FOV
    #kspace = reduce_fov(kspace, ...)

    # compute time-average for ESPIRiT calibration
    kspace_avg = time_average(kspace, axis=-2)

    # ESPIRiT - compute sensitivity maps
    cmd = f'ecalib -d 0 -S -m {args.nmaps} -c {args.crop_value} -r {args.calib_size}'
    maps = bart.bart(1, cmd, kspace_avg[:, :, None, :])
    maps = np.reshape(maps, (nkx, nky, 1, ncoils, args.nmaps))

    # Convert everything to tensors
    kspace_tensor = cplx.to_tensor(kspace).unsqueeze(0)
    maps_tensor = cplx.to_tensor(maps).unsqueeze(0)

    # Do coil combination using sensitivity maps (PyTorch)
    A = T.SenseModel(maps_tensor)
    im_tensor = A(kspace_tensor, adjoint=True)

    # Convert tensor back to numpy array
    target = cplx.to_numpy(im_tensor.squeeze(0))

    return kspace, maps, target
コード例 #2
0
def process_slice(kspace, args, calib_method='jsense'):
    # get data dimensions
    nky, nkz, nechoes, ncoils = kspace.shape

    # ESPIRiT parameters
    nmaps = args.num_emaps
    calib_size = args.ncalib
    crop_value = args.crop_value

    if args.device is -1:
        device = sp.cpu_device
    else:
        device = sp.Device(args.device)

    # compute sensitivity maps (BART)
    #cmd = f'ecalib -d 0 -S -m {nmaps} -c {crop_value} -r {calib_size}'
    #maps = bart.bart(1, cmd, kspace[:,:,0,None,:])
    #maps = np.reshape(maps, (nky, nkz, 1, ncoils, nmaps))

    # compute sensitivity maps (SigPy)
    ksp = np.transpose(kspace[:, :, 0, :], [2, 1, 0])
    if calib_method is 'espirit':
        maps = app.EspiritCalib(ksp,
                                calib_width=calib_size,
                                crop=crop_value,
                                device=device,
                                show_pbar=False).run()
    elif calib_method is 'jsense':
        maps = app.JsenseRecon(ksp,
                               mps_ker_width=6,
                               ksp_calib_width=calib_size,
                               device=device,
                               show_pbar=False).run()
    else:
        raise ValueError('%s calibration method not implemented...' %
                         calib_method)
    maps = np.reshape(np.transpose(maps, [2, 1, 0]),
                      (nky, nkz, 1, ncoils, nmaps))

    # Convert everything to tensors
    kspace_tensor = cplx.to_tensor(kspace).unsqueeze(0)
    maps_tensor = cplx.to_tensor(maps).unsqueeze(0)

    # Do coil combination using sensitivity maps (PyTorch)
    A = T.SenseModel(maps_tensor)
    im_tensor = A(kspace_tensor, adjoint=True)

    # Convert tensor back to numpy array
    image = cplx.to_numpy(im_tensor.squeeze(0))

    return image, maps
コード例 #3
0
def main(args):
    start = time.time()

    if args.verbose:
        logging.basicConfig(level=logging.DEBUG)

    # Figure out multi-GPU stuff
    if args.device > -1:
        logger.info(f'Using GPU device(s) {args.device}...')
        os.environ['CUDA_VISIBLE_DEVICES'] = str(args.device)
        device = f'cuda:{args.device}'
    else:
        logger.info('Using CPU...')
        os.environ['CUDA_VISIBLE_DEVICES'] = ''
        device = 'cpu'

    logger.info('Loading DL Cine model...')
    model, model_args = load_model_and_args(args.checkpoint, args.device)

    # get filenames
    file_kspace = os.path.join(args.directory, args.kspace)
    file_maps = os.path.join(args.directory, args.maps)
    file_output = os.path.join(args.directory, args.output)

    logger.info('Reading input data...')
    kspace = cfl.read(file_kspace, order='F')
    maps = cfl.read(file_maps, order='F')

    # get data dimensions
    nx, ny, nslices, ncoils, _, nechoes, _, nphases = kspace.shape
    nmaps = maps.shape[4]

    # data summary
    logger.info('Detected data dimensions...')
    logger.info('  X (readout):   %d' % nx)
    logger.info('  Y (PE):        %d' % ny)
    logger.info('  Z (slices):    %d' % nslices)
    logger.info('  T (phases):    %d' % nphases)
    logger.info('  E (echoes):    %d' % nechoes)
    logger.info('  C (coils):     %d' % ncoils)
    logger.info('  M (ESPIRiT):   %d' % nmaps)

    # re-shape
    kspace = np.reshape(kspace, [nx, ny, nslices, ncoils, nechoes, nphases])
    kspace = np.transpose(kspace,
                          [2, 4, 0, 1, 5, 3])  # [sl, ec, x, y, t, coils]
    maps = np.reshape(maps, [nx, ny, nslices, ncoils, nmaps, 1, 1])
    maps = np.transpose(maps,
                        [2, 5, 0, 1, 6, 3, 4])  # [sl, 1, x, y, 1, coils, maps]

    # for now, just repeat maps across the echo dimension
    maps = np.tile(maps, [1, nechoes, 1, 1, 1, 1, 1])

    # flatten slice/echo dimension into a batch dimension
    kspace = np.reshape(kspace, [nslices * nechoes, nx, ny, nphases, ncoils])
    maps = np.reshape(maps, [nslices * nechoes, nx, ny, 1, ncoils, nmaps])

    logger.info('Pre-processing data...')
    kspace, maps, init = preprocess(kspace, maps, model_args)

    # Put all arrays on GPU.
    kspace = kspace.to(device)
    maps = maps.to(device)
    init = init.to(device)

    # Allocate memory for output array.
    images = torch.zeros((nslices * nechoes, nx, ny, nphases, nmaps, 2),
                         dtype=torch.float32)

    # Keep track of recon time
    recon_time = 0.0

    logger.info('Begin reconstruction.')
    with torch.no_grad():
        for i in range(nslices * nechoes):
            logger.info('  slice %d/%d' % (i + 1, nslices * nechoes))

            # Reconstruct on GPU.
            slice_start = time.time()
            images[i] = model(kspace[i], maps[i], init[i])
            slice_end = time.time()

            # Move image data to CPU.
            images[i] = images[i].squeeze(0).to('cpu')

            # Add to recon timer.
            recon_time += (slice_end - slice_start)

    images = cplx.to_numpy(images)
    images = np.reshape(images, [nslices, nechoes, nx, ny, nphases, nmaps])
    images = np.transpose(images, [2, 3, 0, 5, 1, 4])
    cfl.write(file_output, images[:, :, :, None, :, :, :], order='F')

    # Print summary.
    logger.info('Reconstruction time: %0.2f' % (recon_time))
    logger.info("Total elapsed time: %0.2f" % (time.time() - start))
コード例 #4
0
os.environ["TOOLBOX_PATH"] = toolbox_path
sys.path.append(os.path.join(toolbox_path, 'python'))
import bart, cfl

# blocking params
block_size = 16
block_stride = 16

# test dataset
filename = '/data/sandino/Cine/validate/Exam2200_Series5_Phases20.h5'

slice = 0  # pick slice
with h5py.File(filename, 'r') as data:
    orig_images = data['target'][slice]

# Convert numpy array to tensor
images = cplx.to_tensor(orig_images).unsqueeze(0)
_, nx, ny, nt, nmaps, _ = images.shape

# Initialize blocking operator
block_op = T.ArrayToBlocks(block_size, images.shape, overlapping=True)

blocks = block_op(images)
images = block_op(blocks, adjoint=True)
images = images.squeeze(0)

# Write out images
images = cplx.to_numpy(images)
cfl.writecfl('block_input', orig_images)
cfl.writecfl('block_output', images)
cfl.writecfl('block_error', orig_images - images)
コード例 #5
0
torch_start_time = time.time()
U_torch, S_torch, V_torch = cplx.svd2(X_torch, compute_uv=True)
torch_end_time = time.time()
print('PyTorch (CPU) time: %f' % (torch_end_time - torch_start_time))

X_torch = X_torch.cuda()
gpu_start_time = time.time()
U_gtorch, S_gtorch, V_gtorch = cplx.svd2(X_torch, compute_uv=True)
gpu_end_time = time.time()
print('PyTorch (GPU) time: %f' % (gpu_end_time - gpu_start_time))

#print('CPU time: %f' % (cpu_end_time - cpu_start_time))
#print('GPU time: %f' % (gpu_end_time - gpu_start_time))

# convert to numpy arrays
U_torch = cplx.to_numpy(U_torch)
S_torch = S_torch.numpy()  # real-valued
V_torch = cplx.to_numpy(V_torch)

if 1:
    print('numpy')
    print(U.shape)
    print(S.shape)
    print(Vh.shape)
    print('\npytorch')
    print(U_torch.shape)
    print(S_torch.shape)
    print(V_torch.shape)

# re-compose
for i in range(batch_size):
コード例 #6
0
def main():
    # ARGS
    input_data_path = '/mnt/dense/data_public/fastMRI/multicoil_val'
    output_data_path = '/mnt/raid3/sandino/fastMRI/validate_full'
    center_fraction = 0.04  # number of k-space lines used to do ESPIRiT calib
    num_emaps = 1
    dbwrite = False

    input_files = glob.glob(os.path.join(input_data_path, '*.h5'))

    for file in input_files:
        # Load HDF5 file
        hf = h5py.File(file, 'r')
        # existing keys: ['ismrmrd_header', 'kspace', 'reconstruction_rss']

        # load k-space and image data from HDF5 file
        kspace_orig = hf['kspace'][()]
        im_rss = hf['reconstruction_rss'][()]  # (33, 320, 320)

        # get data dimensions
        num_slices, num_coils, num_kx, num_ky = kspace_orig.shape
        xres, yres = im_rss.shape[1:3]  # matrix size
        num_low_freqs = int(round(center_fraction * yres))

        # allocate memory for new arrays
        im_shape = (xres, yres)
        kspace = np.zeros((num_slices, xres, yres, num_coils),
                          dtype=np.complex64)
        maps = np.zeros((num_slices, xres, yres, num_coils, num_emaps),
                        dtype=np.complex64)
        im_truth = np.zeros((num_slices, xres, yres, num_emaps),
                            dtype=np.complex64)

        for sl in range(num_slices):
            kspace_slice = np.transpose(kspace_orig[sl], axes=[1, 2, 0])
            kspace_slice = kspace_slice[:, :, None, :]

            # Data dimensions for BART:
            #  kspace - (kx, ky, 1, coils)
            #  maps - (kx, ky, 1, coils, emaps)
            # Data dimensions for PyTorch:
            #  kspace - (1, kx, ky, coils, real/imag)
            #  maps   - (1, kx, ky, coils, emaps, real/imag)

            # Pre-process k-space data (PyTorch)
            kspace_tensor = cplx.to_tensor(
                np.transpose(kspace_slice, axes=[2, 0, 1,
                                                 3]))  # (1, 640, 372, 15, 2)
            image_tensor = T.ifft2(kspace_tensor)
            print(image_tensor.size())
            image_tensor = cplx.center_crop(image_tensor, im_shape)
            kspace_tensor = T.fft2(image_tensor)
            kspace_slice = np.transpose(cplx.to_numpy(kspace_tensor),
                                        axes=[1, 2, 0, 3])

            # Compute sensitivity maps (BART)
            maps_slice = bart.bart(
                1, f'ecalib -d 0 -m {num_emaps} -c 0.1 -r {num_low_freqs}',
                kspace_slice)
            maps_slice = np.reshape(maps_slice,
                                    (xres, yres, 1, num_coils, num_emaps))
            maps_tensor = cplx.to_tensor(
                np.transpose(maps_slice, axes=[2, 0, 1, 3, 4]))

            # Do coil combination using sensitivity maps (PyTorch)
            A = T.SenseModel(maps_tensor)
            im_tensor = A(kspace_tensor, adjoint=True)

            # Convert image tensor to numpy array
            im_slice = cplx.to_numpy(im_tensor)

            # Re-shape and save everything
            kspace[sl] = np.reshape(kspace_slice, (xres, yres, num_coils))
            maps[sl] = np.reshape(maps_slice,
                                  (xres, yres, num_coils, num_emaps))
            im_truth[sl] = np.reshape(im_slice, (xres, yres, num_emaps))

        # write out new hdf5
        file_new = os.path.join(output_data_path, os.path.split(file)[-1])
        with h5py.File(file_new, 'w') as hf_new:
            # create datasets within HDF5
            hf_new.create_dataset('kspace', data=kspace)
            hf_new.create_dataset('maps', data=maps)
            hf_new.create_dataset('reconstruction_espirit', data=im_truth)
            hf_new.create_dataset('reconstruction_rss',
                                  data=im_rss)  # provided by fastMRI
            hf_new.create_dataset('ismrmrd_header', data=hf['ismrmrd_header'])

            # create attributes (metadata)
            for key in hf.attrs.keys():
                hf_new.attrs[key] = hf.attrs[key]

        if dbwrite:
            hf_new = h5py.File(file_new, 'r')
            print('Keys:', list(hf_new.keys()))
            print('Attrs:', dict(hf_new.attrs))
            cfl.writecfl('/home/sandino/maps', hf_new['maps'][()])
            cfl.writecfl('/home/sandino/kspace', hf_new['kspace'][()])
            cfl.writecfl('/home/sandino/im_truth',
                         hf_new['reconstruction_rss'][()])
            cfl.writecfl('/home/sandino/im_recon',
                         hf_new['reconstruction_espirit'][()])
コード例 #7
0
# Extract spatial patches across images
patches = block_op(images)
np = patches.shape[0]

# Reshape into batch of 2D matrices
patches = patches.permute(0, 1, 2, 4, 3, 5)
patches = patches.reshape((np, ne * blk_size**2, nt, 2))

# Perform SVD to get left and right singular vectors
U, S, V = cplx.svd(patches, compute_uv=True)

# Truncate singular values and corresponding singular vectors
U = U[:, :, :nb, :]  # [N, Px*Py*E, B, 2]
S = S[:, :nb]  # [N, B]
V = V[:, :, :nb, :]  # [N, T, B, 2]

# Combine and reshape matrices
S_sqrt = S.reshape((np, 1, 1, 1, 1, nb, 1)).sqrt()
L = U.reshape((np, blk_size, blk_size, 1, ne, nb, 2)) * S_sqrt
R = V.reshape((np, 1, 1, nt, 1, nb, 2)) * S_sqrt
blocks = torch.sum(cplx.mul(L, cplx.conj(R)), dim=-2)

images = block_op(blocks, adjoint=True)

# Write out images
images = cplx.to_numpy(images.squeeze(0))
cfl.writecfl('svdinit_input', orig_images)
cfl.writecfl('svdinit_output', images)
cfl.writecfl('svdinit_error', orig_images - images)
コード例 #8
0
def main(args):
    start = time.time()

    if args.verbose:
        logging.basicConfig(level=logging.DEBUG)

    # Figure out multi-GPU stuff
    if args.device > -1:
        logger.info(f'Using GPU device(s) {args.device}...')
        os.environ['CUDA_VISIBLE_DEVICES'] = str(args.device)
        device = f'cuda:{args.device}'
    else:
        logger.info('Using CPU...')
        os.environ['CUDA_VISIBLE_DEVICES'] = ''
        device = 'cpu'

    logger.info('Loading DL Cine model...')
    model, model_args = load_model_and_args(args.checkpoint, args.device)
    print(model_args)

    # get params for LR decomposition
    decom_params = dict(num_basis=model_args.num_basis,
        block_size=model_args.block_size,
        overlapping=model_args.overlapping)

    # get filenames
    file_kspace = os.path.join(args.directory, args.kspace)
    file_maps = os.path.join(args.directory, args.maps)
    file_images = os.path.join(args.directory, args.output)

    logger.info('Reading input data...')
    kspace = cfl.read(file_kspace, order='F')
    if args.maps is not None:
        maps = cfl.read(file_maps, order='F')
    else:
        logger.info('Warning! Sensitivity maps not provided.')
        logger.info('Estimating ESPIRiT maps...')
        raise ValueError('ESPIRiT calibration not supported yet!')

    # get data dimensions
    nx = kspace.shape[0]
    ny = kspace.shape[1]
    nslices = kspace.shape[2]
    ncoils = kspace.shape[3]
    nechoes = kspace.shape[5]
    nphases = kspace.shape[7]
    nmaps = maps.shape[4]

    # data summary
    logger.info('Detected data dimensions...')
    logger.info('  X (readout):   %d' % nx)
    logger.info('  Y (PE):        %d' % ny)
    logger.info('  Z (slices):    %d' % nslices)
    logger.info('  T (phases):    %d' % nphases)
    logger.info('  E (echoes):    %d' % nechoes)
    logger.info('  C (coils):     %d' % ncoils)
    logger.info('  M (ESPIRiT):   %d' % nmaps)

    # re-shape
    kspace = np.reshape(kspace, [nx, ny, nslices, ncoils, nechoes, nphases])
    kspace = np.transpose(kspace, [2,4,0,1,5,3]) # [sl, ec, x, y, t, coils]
    maps = np.reshape(maps, [nx, ny, nslices, ncoils, nmaps, 1, 1])
    maps = np.transpose(maps, [2,5,0,1,6,3,4]) # [sl, 1, x, y, 1, coils, maps]

    # squash slice and echo dimensions down into one
    kspace = np.reshape(kspace, [nslices*nechoes, nx, ny, nphases, ncoils])
    maps = np.tile(maps, [1,nechoes,1,1,1,1,1])
    maps = np.reshape(maps, [nslices*nechoes, nx, ny, 1, ncoils, nmaps])

    logger.info('Pre-processing data...')
    kspace, maps, init = preprocess(kspace, maps, model_args)

    # Allocate memory for output array.
    images = torch.zeros((nslices*nechoes, nx, ny, nphases, nmaps, 2), dtype=torch.float32)

    # Put all arrays on GPU.
    kspace = kspace.to(device)
    maps = maps.to(device)
    #init = init.to(device) # initialization is faster on CPU!
    
    # Keep track of recon time
    recon_time = 0.0

    logger.info('Begin reconstruction.')
    with torch.no_grad():
        for i in range(nslices*nechoes):
            logger.info('  slice %d/%d' % (i+1, nslices*nechoes))

            # Compute initialization on CPU.
            L, R = T.decompose_LR(init[i], **decom_params)
            initial_guess = (L.to(device), R.to(device))

            # Reconstruct on GPU.
            slice_start = time.time()
            images[i], _ = model(kspace[i], maps[i], initial_guess=initial_guess)
            slice_end = time.time()
            images[i] = images[i].squeeze(0).to('cpu')

            # Add to recon timer.
            recon_time += (slice_end - slice_start)

    images = cplx.to_numpy(images)
    images = np.reshape(images, [nslices, nechoes, nx, ny, nphases, nmaps])
    images = np.transpose(images, [2,3,0,5,1,4])
    cfl.write(file_images, images[:,:,:,None,:,:,:], order='F')

    # Print summary.
    logger.info('Reconstruction time: %0.2f' % (recon_time))
    logger.info("Total elapsed time: %0.2f" % (time.time()-start))
コード例 #9
0
 # Convert numpy array to tensor
images = cplx.to_tensor(orig_images).unsqueeze(0)
_, nx, ny, nt, ne, _ = images.shape

# Initialize lists
glr_images = [None] * len(num_basis)
glr_error = [None] * len(num_basis)
llr_images = [None] * len(num_basis)
llr_error = [None] * len(num_basis)

for i in range(len(num_basis)):
	# Use globally low-rank model to compress images
	glr_images[i] = glr_compress(images, num_basis[i])
	glr_error[i] = images - glr_images[i]

	# Use locally low-rank model to compress images
	llr_images[i] = llr_compress(images, num_basis[i], blk_size, overlapping)
	llr_error[i] = images - llr_images[i]

glr_images = torch.cat(glr_images, axis=2).squeeze(0)
glr_error = torch.cat(glr_error, axis=2).squeeze(0)
llr_images = torch.cat(llr_images, axis=2).squeeze(0)
llr_error = torch.cat(llr_error, axis=2).squeeze(0)

# Write out images
cfl.writecfl('svd_glr_images', cplx.to_numpy(glr_images).swapaxes(0,1))
cfl.writecfl('svd_glr_error', cplx.to_numpy(glr_error).swapaxes(0,1))
cfl.writecfl('svd_llr_images', cplx.to_numpy(llr_images).swapaxes(0,1))
cfl.writecfl('svd_llr_error', cplx.to_numpy(llr_error).swapaxes(0,1))