Exemplo n.º 1
0
    def test_merge_channels(self):
        for img in self.imgs:
            imglist = [img.clone(), img.clone()]
            merged_img = ants.merge_channels(imglist)

            self.assertEqual(merged_img.shape, img.shape)
            self.assertEqual(merged_img.components, len(imglist))
            nptest.assert_allclose(merged_img.numpy()[..., 0],
                                   imglist[0].numpy())

            imglist = [img.clone(), img.clone(), img.clone(), img.clone()]
            merged_img = ants.merge_channels(imglist)

            self.assertEqual(merged_img.shape, img.shape)
            self.assertEqual(merged_img.components, len(imglist))
            nptest.assert_allclose(merged_img.numpy()[..., 0],
                                   imglist[-1].numpy())

        for comparr in self.comparrs:
            imglist = [
                ants.from_numpy(comparr[..., i].copy())
                for i in range(comparr.shape[-1])
            ]
            merged_img = ants.merge_channels(imglist)
            for i in range(comparr.shape[-1]):
                nptest.assert_allclose(merged_img.numpy()[..., i], comparr[...,
                                                                           i])
def apply_super_resolution_model_to_image(
    image,
    model,
    target_range=(-127.5, 127.5),
    batch_size=32,
    regression_order=None,
    verbose=False,
):

    """
    Apply a pretrained deep back projection model for super resolution.
    Helper function for applying a pretrained deep back projection model.
    Apply a patch-wise trained network to perform super-resolution. Can be applied
    to variable sized inputs. Warning: This function may be better used on CPU
    unless the GPU can accommodate the full image size. Warning 2: The global
    intensity range (min to max) of the output will match the input where the
    range is taken over all channels.

    Arguments
    ---------
    image : ANTs image
        input image.

    model : keras object or string
        pretrained keras model or filename.

    target_range : 2-element tuple
        a tuple or array defining the (min, max) of the input image
        (e.g., -127.5, 127.5).  Output images will be scaled back to original
        intensity. This range should match the mapping used in the training
        of the network.

    batch_size : integer
        Batch size used for the prediction call.

    regression_order : integer
        If specified, Apply the function regression_match_image with
        poly_order=regression_order.

    verbose : boolean
        If True, show status messages.

    Returns
    -------
    Super-resolution image upscaled to resolution specified by the network.

    Example
    -------
    >>> import ants
    >>> image = ants.image_read(ants.get_ants_data('r16'))
    >>> image_sr = apply_super_resolution_model_to_image(image, get_pretrained_network("dbpn4x"))
    """
    tflite_flag = False
    channel_axis = 0
    if K.image_data_format() == "channels_last":
        channel_axis = -1

    if target_range[0] > target_range[1]:
        target_range = target_range[::-1]

    start_time = time.time()
    if isinstance(model, str):
        if path.isfile(model):
            if verbose:
                print("Load model.")
            if path.splitext(model)[1] == '.tflite':
                interpreter = tf.lite.Interpreter(model)
                interpreter.allocate_tensors()
                input_details = interpreter.get_input_details()
                output_details = interpreter.get_output_details()
                shape_length = len(interpreter.get_input_details()[0]['shape'])
            else:    
                model = load_model(model)
                shape_length = len(model.input_shape)

            if verbose:
                elapsed_time = time.time() - start_time
                print("  (elapsed time: ", elapsed_time, ")")
        else:
            raise ValueError("Model not found.")
    else:
        shape_length = len(model.input_shape)

    
    if shape_length < 4 | shape_length > 5:
        raise ValueError("Unexpected input shape.")
    else:
        if shape_length == 5 & image.dimension != 3:
            raise ValueError("Expecting 3D input for this model.")
        elif shape_length == 4 & image.dimension != 2:
            raise ValueError("Expecting 2D input for this model.")

    if channel_axis == -1:
        channel_axis < shape_length
    if  tflite_flag:
        channel_size = interpreter.get_input_details()[0]['shape'][channel_axis]
    else:
        channel_size = model.input_shape[channel_axis]

    if channel_size != image.components:
        raise ValueError(
            "Channel size of model",
            str(channel_size),
            "does not match ncomponents=",
            str(image.components),
            "of the input image.",
        )

    image_patches = extract_image_patches(
        image,
        patch_size=image.shape,
        max_number_of_patches=1,
        stride_length=image.shape,
        return_as_array=True,
    )
    if image.components == 1:
        image_patches = np.expand_dims(image_patches, axis=-1)

    image_patches = image_patches - image_patches.min()
    image_patches = (
        image_patches / image_patches.max() * (target_range[1] - target_range[0])
        + target_range[0]
    )

    if verbose:
        print("Prediction")

    start_time = time.time()

    if  tflite_flag:
        image_patches = image_patches.astype('float32')
        interpreter.set_tensor(input_details[0]['index'], image_patches)
        interpreter.invoke()
        out = interpreter.tensor(output_details[0]['index'])
        prediction = out()
    else:
        prediction = model.predict(image_patches, batch_size=batch_size)

    if verbose:
        elapsed_time = time.time() - start_time
        print("  (elapsed time: ", elapsed_time, ")")

    if verbose:
        print("Reconstruct intensities")

    intensity_range = image.range()
    prediction = prediction - prediction.min()
    prediction = (
        prediction / prediction.max() * (intensity_range[1] - intensity_range[0])
        + intensity_range[0]
    )

    def slice_array_channel(input_array, slice, channel_axis=-1):
        if channel_axis == 0:
            if shape_length == 4:
                return input_array[slice, :, :, :]
            else:
                return input_array[slice, :, :, :, :]
        else:
            if shape_length == 4:
                return input_array[:, :, :, slice]
            else:
                return input_array[:, :, :, :, slice]

    expansion_factor = np.asarray(prediction.shape) / np.asarray(image_patches.shape)
    if channel_axis == 0:
        FIXME

    expansion_factor = expansion_factor[1 : (len(expansion_factor) - 1)]

    if verbose:
        print("ExpansionFactor:", str(expansion_factor))

    if image.components == 1:
        image_array = slice_array_channel(prediction, 0, channel_axis)
        prediction_image = ants.make_image(
            (np.asarray(image.shape) * np.asarray(expansion_factor)).astype(int),
            image_array,
        )
        if regression_order is not None:
            reference_image = ants.resample_image_to_target(image, prediction_image)
            prediction_image = regression_match_image(
                prediction_image, reference_image, poly_order=regression_order
            )
    else:
        image_component_list = list()
        for k in range(image.components):
            image_array = slice_array_channel(prediction, k, channel_axis)
            image_component_list.append(
                ants.make_image(
                    (np.asarray(image.shape) * np.asarray(expansion_factor)).astype(
                        int
                    ),
                    image_array,
                )
            )
        prediction_image = ants.merge_channels(image_component_list)

    prediction_image = ants.copy_image_info(image, prediction_image)
    ants.set_spacing(
        prediction_image,
        tuple(np.asarray(image.spacing) / np.asarray(expansion_factor)),
    )

    return prediction_image
import ants
import antspynet
import imageio
import numpy as np

brain = ants.image_read("r16slice.nii.gz")
starry_night = imageio.imread("starry_night.jpg")
starry_night_red = np.squeeze(starry_night[:, :, 0])
starry_night_blue = np.squeeze(starry_night[:, :, 1])
starry_night_green = np.squeeze(starry_night[:, :, 2])

starry_night_ants = ants.merge_channels([
    ants.from_numpy(starry_night_red.astype(float)),
    ants.from_numpy(starry_night_blue.astype(float)),
    ants.from_numpy(starry_night_green.astype(float))
])
starry_night_ants.components = 3

neural = antspynet.neural_style_transfer(
    brain,
    starry_night_ants,
    initial_combination_image=None,
    number_of_iterations=100,
    learning_rate=10.0,
    total_variation_weight=8.5e-5,
    content_weight=0.025,
    style_image_weights=1.0,
    content_layer_names=['block5_conv2'],
    style_layer_names="all",
    content_mask=None,
    style_masks=None,
    # nib.save(newhl,path + '/ED_norm_1201.nii')
    nib.save(shortNorm,path +addPa+'ED_norm_1401.nii')
    print('Normalization Done')
    #Checkboard
    # hl_ch = np.zeros(ED_imgCH.shape)
    vl_ch =np.zeros(ED_imgCH.shape)
    for t in np.arange(0,ED_imgCH.shape[2]):
        # hl_ch[:,:,t] = cheBoard(ED_imgCH[:,:,t],newhlCH[:,:,t],16,roi_hlArr[:,:,t])
        vl_ch[:,:,t] = cheBoard(ED_imgCH[:,:,t],newvlCH[:,:,t],16,roi_vlArr[:,:,t])
    chest_vl = nib.Nifti1Image(vl_ch,newvl.affine,newvl.header)
    # chest_hl = nib.Nifti1Image(hl_ch,newhl.affine,newhl.header)
    print("Checkboard filter applied")
    # nib.save(chest_hl,path + '/ED_chest_1201.nii')
    nib.save(chest_vl,path +addPa+'ED_chest_1301.nii')
    final.append(regED_vl['warpedmovout'])
finaOut = ants.merge_channels(final)
finalroi = ants.merge_channels(outRoi)
finalroiArr = finalroi.view()
finaOutArr = finaOut.view()
out = np.zeros(ED_imgCL.shape)
for i in range(0,finaOut.components):
    slice = np.zeros([ED_imgCH.shape[0],ED_imgCH.shape[1]])

    for j in range(0,finaOut.shape[2]):
        slice =  slice + finaOutArr[i,:,:,j]*finalroiArr[i,:,:,j]

    out[:,:,i] = slice
out = ants.from_numpy(out,origin=finaOut.origin,spacing = finaOut.spacing,direction = finaOut.direction)
out = ants.to_nibabel(out)
nib.save(out,path+'/ED_final.nii')
print("Quieto ahi")