Esempio n. 1
0
def ngradient_test():

    # NOTE: test function not strictly scalar-valued
    exponential = lambda x: np.exp(x)
    g1 = reg.ngradient(exponential, np.ones((1, )))
    assert abs(
        g1 - exponential(1)
    ) < 1e-5, "Numerical gradient is incorrectly implemented (exponential test)"

    #------------------------------------------------------------------#
    # TODO: Implement a few more test cases of ngradient
    testfunction2 = lambda x: 3 * x**2
    g2 = reg.ngradient(testfunction2, np.array([1]))

    assert abs(g2 - 6) < 1e-5, "Numerical gradient is incorrectly implemented"

    testfunction3 = lambda x: np.sqrt(x)
    g3 = reg.ngradient(testfunction3, np.array([1]))
    assert abs(g3 -
               0.5) < 1e-5, "Numerical gradient is incorrectly implemented"

    testfunction4 = lambda x, y: x**2 + y**2
    g4 = reg.ngradient(testfunction4, np.array([2, 5]))
    print(g4)
    #------------------------------------------------------------------#

    print('Test successful!')
def ngradient_test():

    # NOTE: test function not strictly scalar-valued
    exponential = lambda x: np.exp(x)
    g1 = reg.ngradient(exponential, np.ones((1, )))
    assert abs(
        g1 - exponential(1)
    ) < 1e-5, "Numerical gradient is incorrectly implemented (exponential test)"

    g2 = reg.ngradient(test_fun, [1, 2])

    print('Test successful!')
Esempio n. 3
0
def ngradient_test():

    # NOTE: test function not strictly scalar-valued
    exponential = lambda x: np.exp(x)
    g1 = reg.ngradient(exponential, np.ones((1, )))
    assert abs(
        g1 - exponential(1)
    ) < 1e-5, "Numerical gradient is incorrectly implemented (exponential test)"

    #------------------------------------------------------------------#
    # Implement a few more test cases of ngradient
    deel = lambda x: x / 2
    g2 = reg.ngradient(deel, np.array([1, 2, 3, 4, 5]))
    print(g1, g2)
    #------------------------------------------------------------------#

    print('Test successful!')
Esempio n. 4
0
def logistic_regression():
    
    # dataset preparation
    num_training_samples = 300
    num_validation_samples = 100
    
    # here we reuse the function from the segmentation practicals
    m1=[2,3]
    m2=[-0,-4]
    s1=[[8,7],[7,8]]
    s2=[[8,6],[6,8]]

    [trainingX, trainingY] = util.generate_gaussian_data(num_training_samples, m1, m2, s1, s2)
    r,c = trainingX.shape
    print('Training sample shape: {}'.format(trainingX.shape))

    # we need a validation set to monitor for overfitting
    [validationX, validationY] = util.generate_gaussian_data(num_validation_samples, m1, m2, s1, s2)
    r_val,c_val = validationX.shape
    print('Validation sample shape: {}'.format(validationX.shape))
    
    validationXones = util.addones(validationX)

    # train a logistic regression model:
    # the learning rate for the gradient descent method
    # (the same as in intensity-based registration)
    mu = 0.001

    # we are actually using stochastic gradient descent
    batch_size = 30

    # initialize the parameters of the model with small random values,
    # we need one parameter for each feature and a bias
    Theta = 0.02*np.random.rand(c+1, 1)

    # number of gradient descent iterations
    num_iterations = 300

    # variables to keep the loss and gradient at every iteration
    # (needed for visualization)
    iters = np.arange(num_iterations)
    loss = np.full(iters.shape, np.nan)
    validation_loss = np.full(iters.shape, np.nan)

    # Create base figure
    fig = plt.figure(figsize=(15,8))
    ax1 = fig.add_subplot(121)
    im1, Xh_ones, num_range_points = util.plot_lr(trainingX, trainingY, Theta, ax1)
    util.scatter_data(trainingX, trainingY, ax=ax1);
    ax1.grid()
    ax1.set_xlabel('x_1')
    ax1.set_ylabel('x_2')
    ax1.legend()
    ax1.set_title('Training set')
    text_str1 = '{:.4f};  {:.4f};  {:.4f}'.format(0, 0, 0)
    txt1 = ax1.text(0.3, 0.95, text_str1, bbox={'facecolor': 'white', 'alpha': 1, 'pad': 10}, transform=ax1.transAxes)

    ax2 = fig.add_subplot(122)
    ax2.set_xlabel('Iteration')
    ax2.set_ylabel('Loss (average per sample)')
    ax2.set_title('mu = '+str(mu))
    h1, = ax2.plot(iters, loss, linewidth=2, label='Training loss')
    h2, = ax2.plot(iters, validation_loss, linewidth=2, label='Validation loss')
    ax2.set_ylim(0, 0.7)
    ax2.set_xlim(0, num_iterations)
    ax2.grid()
    ax1.legend()

    text_str2 = 'iter.: {}, loss: {:.3f}, val. loss: {:.3f}'.format(0, 0, 0)
    txt2 = ax2.text(0.3, 0.95, text_str2, bbox={'facecolor': 'white', 'alpha': 1, 'pad': 10}, transform=ax2.transAxes)

    # iterate
    for k in np.arange(num_iterations):
        
        # pick a batch at random
        idx = np.random.randint(r, size=batch_size)

        # the loss function for this particular batch
        loss_fun = lambda Theta: cad.lr_nll(util.addones(trainingX[idx,:]), trainingY[idx], Theta)

        # gradient descent:
        # here we reuse the code for numerical computation of the gradient
        # of a function
        Theta = Theta - mu*reg.ngradient(loss_fun, Theta)

        # compute the loss for the current model parameters for the
        # training and validation sets
        # note that the loss is divided with the number of samples so
        # it is comparable for different number of samples
        loss[k] = loss_fun(Theta)/batch_size
        validation_loss[k] = cad.lr_nll(validationXones, validationY, Theta)/r_val

        # upldate the visualization
        ph = cad.sigmoid(Xh_ones.dot(Theta)) > 0.5
        decision_map = ph.reshape(num_range_points, num_range_points)
        decision_map_trns = np.flipud(decision_map)
        im1.set_data(decision_map_trns)
        text_str1 = '{:.4f};  {:.4f};  {:.4f}'.format(Theta[0,0], Theta[1,0], Theta[2,0])
        txt1.set_text(text_str1)
        h1.set_ydata(loss)
        h2.set_ydata(validation_loss)
        text_str2 = 'iter.={}, loss={:.3f}, val. loss={:.3f} '.format(k, loss[k], validation_loss[k])
        txt2.set_text(text_str2)


        display(fig)
        clear_output(wait = True)
def intensity_based_registration_demo():

    # read the fixed and moving images
    # change these in order to read different images
    I = plt.imread('../data/image_data/1_1_t1.tif')
    Im = plt.imread('../data/image_data/1_1_t1_d.tif')

    # initial values for the parameters
    # we start with the identity transformation
    # most likely you will not have to change these
    x = np.array([0., 0., 0.])

    # NOTE: for affine registration you have to initialize
    # more parameters and the scaling parameters should be
    # initialized to 1 instead of 0

    # the similarity function
    # this line of code in essence creates a version of rigid_corr()
    # in which the first two input parameters (fixed and moving image)
    # are fixed and the only remaining parameter is the vector x with the
    # parameters of the transformation

    # Function gave error due to passing of more than just the correlation
    # value (also transformed picture and transformation). This small change
    # solves this problem...
    fun = lambda x: (reg.rigid_corr(I, Im, x))[0]
    fun_full = lambda x: reg.rigid_corr(I, Im, x)

    # the learning rate
    mu = 0.001

    # number of iterations
    num_iter = 200

    iterations = np.arange(1, num_iter+1)
    similarity = np.full((num_iter, 1), np.nan)

    fig = plt.figure(figsize=(14,6))

    # fixed and moving image, and parameters
    ax1 = fig.add_subplot(121)

    # fixed image
    im1 = ax1.imshow(I)
    # moving image
    im2 = ax1.imshow(I, alpha=0.7)
    # parameters
    txt = ax1.text(0.3, 0.95,
        np.array2string(x, precision=5, floatmode='fixed'),
        bbox={'facecolor': 'white', 'alpha': 1, 'pad': 10},
        transform=ax1.transAxes)

    # 'learning' curve
    ax2 = fig.add_subplot(122, xlim=(0, num_iter), ylim=(0, 1))

    learning_curve, = ax2.plot(iterations, similarity, lw=2)
    ax2.set_xlabel('Iteration')
    ax2.set_ylabel('Similarity')
    ax2.grid()

    # perform 'num_iter' gradient ascent updates
    for k in np.arange(num_iter):

        # gradient ascent
        g = reg.ngradient(fun, x)
        x += g*mu

        # for visualization of the result
        S, Im_t, _ = fun_full(x)

        clear_output(wait = True)

        # update moving image and parameters
        im2.set_data(Im_t)
        txt.set_text(np.array2string(x, precision=5, floatmode='fixed'))

        # update 'learning' curve
        similarity[k] = S
        learning_curve.set_ydata(similarity)

        display(fig)
def intensity_based_registration(I_path, Im_path, r_a_switch=0, corr_mi_switch=0):

    # r_a_switch: 0 --> rigid (default)
    #             1 --> affine
    assert r_a_switch == 0 or r_a_switch == 1, "Error: input parameter r_a_switch must be either 0 or 1.. "

    # corr_mi_switch: 0 --> correlation (default)
    #                 1 --> mutual information
    assert r_a_switch == 0 or r_a_switch == 1, "Error: input parameter r_a_switch must be either 0 or 1.. "

    # read the fixed and moving images
    # change these in order to read different images
    I = plt.imread(I_path)
    Im = plt.imread(Im_path)

    # initial values for the parameters
    if r_a_switch == 0:
        x = np.array([0., 0., 0.]) # rotation,transx,transy
    elif r_a_switch == 1:
        x = np.array([0., 1., 1.,0.,0.,0.,0.]) # rotation,scalex,scaley,shearx,sheary,transx,transy
    else:
        print("ERROR.. r_a_switch must be either 0 or 1")

    # the similarity function
    # this line of code in essence creates a version of rigid_corr()
    # in which the first two input parameters (fixed and moving image)
    # are fixed and the only remaining parameter is the vector x with the
    # parameters of the transformation

    if r_a_switch == 0:
        assert corr_mi_switch == 0, "Error, only correlation similarity possible with rigid registration"
        sim_fun = reg.rigid_corr
    elif r_a_switch == 1:
        if corr_mi_switch == 0:
            sim_fun = reg.affine_corr
        else:
            sim_fun = reg.affine_mi
    else:
        print("ERROR.. r_a_switch must be either 0 or 1")

    fun = lambda x: (sim_fun(I, Im, x))[0]
    fun_full = lambda x: sim_fun(I, Im, x)

    if corr_mi_switch == 0:
        # the initial learning rate
        mu = 0.005
        # number of iterations
        num_iter = 50
    else:
        # the initial learning rate
        mu = 0.003
        # number of iterations
        num_iter = 30

    # Which results in the following formula for mu:
    fun_mu = lambda i: mu*np.exp(-5*i/num_iter)         # Which results in an initial mu at iteration 1 and a mu/200 at final iteration

    iterations = np.arange(1, num_iter+1)
    similarity = np.full((num_iter, 1), np.nan)

    fig = plt.figure(figsize=(14,6))

    # fixed and moving image, and parameters
    ax1 = fig.add_subplot(121)

    # fixed image
    im1 = ax1.imshow(I)
    # moving image
    im2 = ax1.imshow(I, alpha=0.7)
    # parameters
    txt = ax1.text(0.3, 0.95,
        np.array2string(x, precision=5, floatmode='fixed'),
        bbox={'facecolor': 'white', 'alpha': 1, 'pad': 10},
        transform=ax1.transAxes)

    # 'learning' curve
    ax2 = fig.add_subplot(122, xlim=(0, num_iter), ylim=(0, 1.1))

    learning_curve, = ax2.plot(iterations, similarity, lw=2)
    ax2.set_xlabel('Iteration')
    ax2.set_ylabel('Similarity')
    ax2.grid()

    # perform 'num_iter' gradient ascent updates
    i = 0
    for k in np.arange(num_iter):

        # gradient ascent
        g = reg.ngradient(fun, x)
        x += g*fun_mu(i)

        # for visualization of the result
        S, Im_t, _ = fun_full(x)

        clear_output(wait = True)

        # update moving image and parameters
        im2.set_data(Im_t)
        txt.set_text(np.array2string(x, precision=5, floatmode='fixed'))

        # update 'learning' curve
        similarity[k] = S
        learning_curve.set_ydata(similarity)

        display(fig)

        i = i+1
def intensity_based_registration_for_loop():

    # Defining of images, learning rates and number of iterations that you want to test.
    # Look at lines 24/25 and 112 to properly define the image paths and savenames to your needs.
    imagepaths = ['2_2', '2_3', '3_1', '3_2', '3_3']
    mu_s = [0.00018, 0.0001, 0.0002, 0.00013, 0.00015]
    iterationlist = [350, 350, 350, 350, 350]
    savenames = ['2_2', '2_3', '3_1', '3_2', '3_3']

    for i in range(len(imagepaths)):

        # read the fixed and moving images
        # change these in order to read different images
        I = plt.imread('./data/image_data/{}_t1.tif'.format(imagepaths[i]))
        Im = plt.imread('./data/image_data/{}_t1_d.tif'.format(imagepaths[i]))

        # initial values for the parameters
        # we start with the identity transformation
        # most likely you will not have to change these
        similarity_measure = reg.affine_corr

        if similarity_measure == reg.rigid_corr:
            x = np.array([0., 0., 0.])

        else:
            x = np.array([0., 1., 1., 0., 0., 0., 0.])

        # NOTE: for affine registration you have to initialize
        # more parameters and the scaling parameters should be
        # initialized to 1 instead of 0

        # the similarity function
        # this line of code in essence creates a version of rigid_corr()
        # in which the first two input parameters (fixed and moving image)
        # are fixed and the only remaining parameter is the vector x with the
        # parameters of the transformation
        fun = lambda x: reg.affine_mi(I, Im, x)

        # the learning rate
        mu = mu_s[i]

        # number of iterations
        num_iter = iterationlist[i]

        iterations = np.arange(1, num_iter + 1)
        similarity = np.full((num_iter, 1), np.nan)

        fig = plt.figure(figsize=(14, 6))

        # fixed and moving image, and parameters
        ax1 = fig.add_subplot(121)

        # fixed image
        im1 = ax1.imshow(I)
        # moving image
        im2 = ax1.imshow(I, alpha=0.7)
        # parameters
        if similarity_measure == reg.rigid_corr:
            txt = ax1.text(0.3,
                           0.95,
                           np.array2string(x, precision=5, floatmode='fixed'),
                           bbox={
                               'facecolor': 'white',
                               'alpha': 1,
                               'pad': 10
                           },
                           transform=ax1.transAxes)
        else:
            txt = ax1.text(-0.02,
                           1.02,
                           np.array2string(x, precision=5, floatmode='fixed'),
                           bbox={
                               'facecolor': 'white',
                               'alpha': 1,
                               'pad': 10
                           },
                           transform=ax1.transAxes)
        # 'learning' curve
        ax2 = fig.add_subplot(122, xlim=(0, num_iter), ylim=(0, 1))

        learning_curve, = ax2.plot(iterations, similarity, lw=2)
        ax2.set_title("mu =" + str(mu))
        ax2.set_xlabel('Iteration')
        ax2.set_ylabel('Similarity')
        ax2.grid()

        # perform 'num_iter' gradient ascent updates
        for k in np.arange(num_iter):

            # gradient ascent
            g = reg.ngradient(fun, x)
            x += g * mu

            # for visualization of the result
            S, Im_t, _ = fun(x)

            clear_output(wait=True)

            # update moving image and parameters
            im2.set_data(Im_t)
            txt.set_text(np.array2string(x, precision=5, floatmode='fixed'))

            # update 'learning' curve
            similarity[k] = S
            learning_curve.set_ydata(similarity)

            # display the figure
            display(fig)

        # save the figure
        # Currently optimized for speed. If one wants to save an image every iteration one has to tab these lines.
        savename = './data/image_results/{}_t1 + {}_t1_d affine_mi mu = {} integer = {}.png'.format(
            savenames[i], savenames[i], mu, num_iter)
        fig.savefig(savename)
def intensity_based_registration_demo():

    # read the fixed and moving images
    # change these in order to read different images
    I = plt.imread('./data/image_data/3_2_t1.tif')
    Im = plt.imread('./data/image_data/3_2_t1_d.tif')

    # initial values for the parameters
    # we start with the identity transformation
    # most likely you will not have to change these
    similarity_measure = reg.rigid_corr

    if similarity_measure == reg.rigid_corr:
        x = np.array([0., 0., 0.])

    else:
        x = np.array([0., 1., 1., 0., 0., 0., 0.])

    # NOTE: for affine registration you have to initialize
    # more parameters and the scaling parameters should be
    # initialized to 1 instead of 0

    # the similarity function
    # this line of code in essence creates a version of rigid_corr()
    # in which the first two input parameters (fixed and moving image)
    # are fixed and the only remaining parameter is the vector x with the
    # parameters of the transformation
    fun = lambda x: similarity_measure(I, Im, x)

    # the learning rate
    mu = 0.0009

    # number of iterations
    num_iter = 200

    iterations = np.arange(1, num_iter + 1)
    similarity = np.full((num_iter, 1), np.nan)

    fig = plt.figure(figsize=(14, 6))

    # fixed and moving image, and parameters
    ax1 = fig.add_subplot(121)

    # fixed image
    im1 = ax1.imshow(I)
    # moving image
    im2 = ax1.imshow(I, alpha=0.7)
    # parameters
    if similarity_measure == reg.rigid_corr:
        txt = ax1.text(0.3,
                       0.95,
                       np.array2string(x, precision=5, floatmode='fixed'),
                       bbox={
                           'facecolor': 'white',
                           'alpha': 1,
                           'pad': 10
                       },
                       transform=ax1.transAxes)
    else:
        txt = ax1.text(-0.02,
                       1.02,
                       np.array2string(x, precision=5, floatmode='fixed'),
                       bbox={
                           'facecolor': 'white',
                           'alpha': 1,
                           'pad': 10
                       },
                       transform=ax1.transAxes)
    # 'learning' curve
    ax2 = fig.add_subplot(122, xlim=(0, num_iter), ylim=(0, 1))

    learning_curve, = ax2.plot(iterations, similarity, lw=2)
    ax2.set_title("mu =" + str(mu))
    ax2.set_xlabel('Iteration')
    ax2.set_ylabel('Similarity')
    ax2.grid()

    # perform 'num_iter' gradient ascent updates
    for k in np.arange(num_iter):

        # gradient ascent
        g = reg.ngradient(fun, x)
        x += g * mu

        # for visualization of the result
        S, Im_t, _ = fun(x)

        clear_output(wait=True)

        # update moving image and parameters
        im2.set_data(Im_t)
        txt.set_text(np.array2string(x, precision=5, floatmode='fixed'))

        # update 'learning' curve
        similarity[k] = S
        learning_curve.set_ydata(similarity)

        display(fig)

    # save the figure
    # Currently optimized for speed. If one wants to save an image every iteration one has to tab these lines.
    fig.savefig(
        './data/image_results/3_2_t1 + 3_2_t1_d+ reg.rigid_corr + mu = ' +
        str(mu) + ' integer = ' + str(num_iter) + '.png')
def intensity_based_registration_no_vis(I_path,
                                        Im_path,
                                        r_a_switch=0,
                                        corr_mi_switch=0):
    # This is an edited ibr function that doesn't display any output.
    # It is also slightly optimised to be able to run a bit faster.
    #
    # Output: Similarity plot, I, Im_t

    # r_a_switch: 0 --> rigid (default)
    #             1 --> affine
    assert r_a_switch == 0 or r_a_switch == 1, "Error: input parameter r_a_switch must be either 0 or 1.. "

    # corr_mi_switch: 0 --> correlation (default)
    #                 1 --> mutual information
    assert corr_mi_switch == 0 or corr_mi_switch == 1, "Error: input parameter corr_mi_switch must be either 0 or 1.. "

    # read the fixed and moving images
    # change these in order to read different images
    I = plt.imread(I_path)
    Im = plt.imread(Im_path)

    # initial values for the parameters
    if r_a_switch == 0:
        x = np.array([0., 0., 0.])  # rotation,transx,transy
    elif r_a_switch == 1:
        x = np.array([0., 1., 1., 0., 0., 0., 0.
                      ])  # rotation,scalex,scaley,shearx,sheary,transx,transy
    else:
        print("ERROR.. r_a_switch must be either 0 or 1")

    # the similarity function
    # this line of code in essence creates a version of rigid_corr()
    # in which the first two input parameters (fixed and moving image)
    # are fixed and the only remaining parameter is the vector x with the
    # parameters of the transformation

    if r_a_switch == 0:
        assert corr_mi_switch == 0, "Error, only correlation similarity possible with rigid registration"
        sim_fun = reg.rigid_corr
    elif r_a_switch == 1:
        if corr_mi_switch == 0:
            sim_fun = reg.affine_corr
        else:
            sim_fun = reg.affine_mi
    else:
        print("ERROR.. r_a_switch must be either 0 or 1")

    fun = lambda x: (sim_fun(I, Im, x))[0]
    fun_full = lambda x: sim_fun(I, Im, x)

    if corr_mi_switch == 0:
        # the initial learning rate
        mu = 0.005
        # number of iterations
        num_iter = 50
    else:
        # the initial learning rate
        mu = 0.003
        # number of iterations
        num_iter = 30

    # Which results in the following formula for mu:
    fun_mu = lambda i: mu * np.exp(
        -5 * i / num_iter
    )  # Which results in an initial mu at iteration 1 and a mu/200 at final iteration

    iterations = np.arange(1, num_iter + 1)
    similarity = np.full((num_iter, 1), np.nan)

    # perform 'num_iter' gradient ascent updates
    i = 0
    for k in np.arange(num_iter):

        # gradient ascent
        g = reg.ngradient(fun, x)
        x += g * fun_mu(i)

        # for visualization of the result
        S, Im_t, _ = fun_full(x)

        # update 'learning' curve
        similarity[k] = S

        i = i + 1

    return similarity, iterations, I, Im_t, x
Esempio n. 10
0
def intensity_based_registration_demo(I,
                                      Im,
                                      mu=0.0005,
                                      num_iter=100,
                                      h=1e-3,
                                      x=np.array([0., 1., 1., 0., 0., 0., 0.]),
                                      type="affine",
                                      sim_meas="mi"):
    # read the fixed and moving images
    # change these in order to read different images
    # I = plt.imread('../data/image_data/1_1_t1.tif')
    # Im = plt.imread('../data/image_data/1_1_t2.tif')

    # initial values for the parameters
    # we start with the identity transformation
    # most likely you will not have to change these
    # x = np.array([0., 1., 1., 0., 0., 0., 0.])

    # NOTE: for affine registration you have to initialize
    # more parameters and the scaling parameters should be
    # initialized to 1 instead of 0

    # the similarity function
    # this line of code in essence creates a version of rigid_corr()
    # in which the first two input parameters (fixed and moving image)
    # are fixed and the only remaining parameter is the vector x with the
    # parameters of the transformation

    assert type.lower() in ["affine", "rigid"], "error: unknown type"
    assert sim_meas.lower() in ["mi",
                                "cc"], "error: unknown similarity measure"

    if type == "affine":
        if sim_meas == "mi":
            fun = lambda x: reg.affine_mi(I, Im, x)
        else:
            fun = lambda x: reg.affine_corr(I, Im, x)
    else:
        if sim_meas == "cc":
            fun = lambda x: reg.rigid_corr(I, Im, x)
        else:
            ModuleNotFoundError(
                "no functionality for type=rigid and sim_meas=mi")

    # the learning rate
    # mu = 0.0005

    # number of iterations
    # num_iter = 100

    iterations = np.arange(1, num_iter + 1)
    similarity = np.full((num_iter, 1), np.nan)

    fig = plt.figure(figsize=(14, 6))

    # fixed and moving image, and parameters
    ax1 = fig.add_subplot(121)

    # fixed image
    im1 = ax1.imshow(I)
    # moving image
    im2 = ax1.imshow(Im, alpha=0.5)
    # parameters
    txt = ax1.text(0.3,
                   0.95,
                   np.array2string(x, precision=5, floatmode='fixed'),
                   bbox={
                       'facecolor': 'white',
                       'alpha': 1,
                       'pad': 10
                   },
                   transform=ax1.transAxes)

    # 'learning' curve
    ax2 = fig.add_subplot(122, xlim=(0, num_iter), ylim=(0, 2))

    learning_curve, = ax2.plot(iterations, similarity, lw=2)
    ax2.set_xlabel('Iteration')
    ax2.set_ylabel('Similarity')
    ax2.grid()

    path = []
    # perform 'num_iter' gradient ascent updates
    for k in np.arange(num_iter):
        # gradient ascent
        g = reg.ngradient(fun, x, h=h)
        x += g * mu
        path.append(x.copy())

        # for visualization of the result
        S, Im_t, _ = fun(x)

        clear_output(wait=True)

        # update moving image and parameters
        im2.set_data(Im_t)
        txt.set_text(np.array2string(x, precision=5, floatmode='fixed'))

        # update 'learning' curve
        similarity[k] = S
        learning_curve.set_ydata(similarity)

        if k % int(num_iter / 10) == 0:
            print("biep, {:4.0%}...".format(k / num_iter))

    print("helemaal klaar dr mee!")

    fig.show()
    return path
Esempio n. 11
0
def intensity_based_registration_rigid_Corr_adapted(I, Im):

    #ADAPTED:

    #Added 1)'fun2' with the original reg.rigid_corr(I, Im, x), because

    #three outputs (C, Im_t, Th) are needed for the visualization. So you

    #use the adapted function. 'fun' is the adapted rigid_corr.

    #2) Changed "x += g*mu" to "x =np.add(x, g*mu)", because of shape/dimension error

    #3) Flattened the x-array when used as input for fun2, because of shape/dimension error

    # read the fixed and moving images

    # change these in order to read different images

    # initial values for the parameters

    # we start with the identity transformation

    # most likely you will not have to change these

    x = np.array([0., 0., 0.])

    # NOTE: for affine registration you have to initialize

    # more parameters and the scaling parameters should be

    # initialized to 1 instead of 0

    # the similarity function

    # this line of code in essence creates a version of rigid_corr()

    # in which the first two input parameters (fixed and moving image)

    # are fixed and the only remaining parameter is the vector x with the

    # parameters of the transformation

    fun = lambda x: reg_adapt.rigid_corr_adapted(I, Im, x)

    fun2 = lambda x: reg.rigid_corr(I, Im, x)

    # the learning rate

    mu = 0.001

    # number of iterations

    num_iter = 200

    iterations = np.arange(1, num_iter + 1)

    similarity = np.full((num_iter, 1), np.nan)

    fig = plt.figure(figsize=(14, 6))

    # fixed and moving image, and parameters

    ax1 = fig.add_subplot(121)

    # fixed image

    im1 = ax1.imshow(I)

    # moving image

    im2 = ax1.imshow(I, alpha=0.7)

    # parameters

    txt = ax1.text(0.3,
                   0.95,
                   np.array2string(x, precision=5, floatmode='fixed'),
                   bbox={
                       'facecolor': 'white',
                       'alpha': 1,
                       'pad': 10
                   },
                   transform=ax1.transAxes)

    # 'learning' curve

    ax2 = fig.add_subplot(122, xlim=(0, num_iter), ylim=(0, 1))

    learning_curve, = ax2.plot(iterations, similarity, lw=2)

    ax2.set_xlabel('Iteration')

    ax2.set_ylabel('Similarity')

    ax2.grid()

    # perform 'num_iter' gradient ascent updates

    for k in np.arange(num_iter):

        # gradient ascent

        g = reg.ngradient(fun, x)

        x = np.add(x, g * mu)

        # for visualization of the result

        S, Im_t, _ = fun2(x.flatten())

        clear_output(wait=True)

        # update moving image and parameters

        im2.set_data(Im_t)

        txt.set_text(np.array2string(x, precision=5, floatmode='fixed'))

        # update 'learning' curve

        similarity[k] = S

        learning_curve.set_ydata(similarity)

        display(fig)
Esempio n. 12
0
def intensity_based_registration(I, Im, Affine=True, CC=True):

    # initial values for the parameters
    # we start with the identity transformation
    # most likely you will not have to change these
    if Affine:
        x = np.array([0., 1., 1., 0., 0., 0., 0.])
        if CC:
            fun = lambda x: reg.affine_corr(I, Im, x)
        else:
            fun = lambda x: reg.affine_mi(I, Im, x)
    else:
        x = np.array([0., 0., 0.])
        fun = lambda x: reg.rigid_corr(I, Im, x)

    # the learning rate
    mu = 0.001

    # number of iterations
    num_iter = 200

    iterations = np.arange(1, num_iter + 1)
    similarity = np.full((num_iter, 1), np.nan)

    fig = plt.figure(figsize=(14, 6))

    # fixed and moving image, and parameters
    ax1 = fig.add_subplot(121)

    # fixed image
    im1 = ax1.imshow(I)
    # moving image
    im2 = ax1.imshow(I, alpha=0.7)
    # parameters
    txt = ax1.text(0.3,
                   0.95,
                   np.array2string(x, precision=5, floatmode='fixed'),
                   bbox={
                       'facecolor': 'white',
                       'alpha': 1,
                       'pad': 10
                   },
                   transform=ax1.transAxes)

    # 'learning' curve
    ax2 = fig.add_subplot(122, xlim=(0, num_iter), ylim=(0, 1))

    learning_curve, = ax2.plot(iterations, similarity, lw=2)
    ax2.set_xlabel('Iteration')
    ax2.set_ylabel('Similarity')
    ax2.grid()

    # perform 'num_iter' gradient ascent updates
    for k in np.arange(num_iter):

        # gradient ascent
        g = reg.ngradient(fun, x)
        x += g * mu

        # for visualization of the result
        S, Im_t, _ = fun(x)

        clear_output(wait=True)

        # update moving image and parameters
        im2.set_data(Im_t)
        txt.set_text(np.array2string(x, precision=5, floatmode='fixed'))

        # update 'learning' curve
        similarity[k] = S
        learning_curve.set_ydata(similarity)

        display(fig)
text_str2 = 'iter.: {}, loss: {:.3f}, val. loss: {:.3f}'.format(0, 0, 0)
txt2 = ax2.text(0.3, 0.95, text_str2, bbox={'facecolor': 'white', 'alpha': 1, 'pad': 10}, transform=ax2.transAxes)

# iterate
for k in np.arange(num_iterations):
    
    # pick a batch at random
    idx = np.random.randint(r, size=batch_size)
    
    # the loss function for this particular batch
    loss_fun = lambda Theta: cad.lr_nll(util.addones(trainingX[idx,:]), trainingY[idx], Theta)
    
    # gradient descent:
    # here we reuse the code for numerical computation of the gradient
    # of a function
    Theta = Theta - mu*reg.ngradient(loss_fun, Theta)
    
    # compute the loss for the current model parameters for the
    # training and validation sets
    # note that the loss is divided with the number of samples so
    # it is comparable for different number of samples
    loss[k] = loss_fun(Theta)/batch_size
    validation_loss[k] = cad.lr_nll(validationXones, validationY, Theta)/r_val
    
    # upldate the visualization
    ph = cad.sigmoid(Xh_ones.dot(Theta)) > 0.5
    decision_map = ph.reshape(num_range_points, num_range_points)
    decision_map_trns = np.flipud(decision_map)
    im1.set_data(decision_map_trns)
    text_str1 = '{:.4f};  {:.4f};  {:.4f}'.format(Theta[0,0], Theta[1,0], Theta[2,0])
    txt1.set_text(text_str1)
Esempio n. 14
0
def intensity_based_registration(I,
                                 Im,
                                 mu=0.0005,
                                 num_iter=200,
                                 Affine=True,
                                 MI=True,
                                 Gradient=False):
    # Performs fully automatic intensity based image registration, providing
    # options for Rigid and Affine transformation,
    # options for Cross-correlation (CC) and Mutual Information (MI) as similarity metrics,
    # and options for Gradient ascent and Nelder-Mead as optimization algorithms.
    # Input:
    # I - Fixed image
    # Im - Moving image
    # mu - step size for Gradient ascent algorithm
    # num_iter - number of iterations for Gradient ascent algorithm
    # Affine - transformation method (standard = Affine)
    # MI - similarity metric (standard = MI)
    # Gradient - optimization algorithm (standard = Nelder-Mead)
    # Output:
    # similarity - final similarity of the images
    # time_elapsed - time elapsed for image registration
    # Im_t - registered image

    start_time = time.clock()

    if Affine:
        x = np.array([0., 1., 1., 0., 0., 0., 0.])
        fun = lambda x: affine_corr(I, Im, x, MI)
    else:
        x = np.array([0., 0., 0.])
        fun = lambda x: rigid_corr(I, Im, x, MI)

    iterations = np.arange(1, num_iter + 1)
    similarity = np.full((num_iter, 1), np.nan)

    fig = plt.figure(figsize=(14, 6))

    # fixed and moving image, and parameters
    ax1 = fig.add_subplot(121)

    # fixed image
    im1 = ax1.imshow(I)
    # moving image
    im2 = ax1.imshow(I, alpha=0.7)
    # parameters
    txt1 = ax1.text(0.05,
                    0.95,
                    " ",
                    bbox={
                        'facecolor': 'white',
                        'alpha': 1,
                        'pad': 10
                    },
                    transform=ax1.transAxes)

    # 'learning' curve
    ax2 = fig.add_subplot(122, xlim=(0, num_iter), ylim=(0, 1))

    learning_curve, = ax2.plot(iterations, similarity, lw=2)
    ax2.set_xlabel('Iteration')
    ax2.set_ylabel('Similarity')
    ax2.grid()

    txt2 = ax2.text(1.48,
                    0.18,
                    " ",
                    bbox={
                        'facecolor': 'white',
                        'alpha': 1,
                        'pad': 10
                    },
                    transform=ax1.transAxes)

    if Gradient:
        # perform 'num_iter' gradient ascent updates
        for k in np.arange(num_iter):

            # gradient ascent
            g = reg.ngradient(fun, x)
            x += g * mu

            #for visualization of the result

            S, Im_t, _ = fun(x)
            clear_output(wait=True)

            current_time = time.clock() - start_time

            # update moving image and parameters
            im2.set_data(Im_t)
            txt1.set_text(np.array2string(x, precision=5, floatmode='fixed'))
            txt2.set_text("mu = " + str(mu) + ", num_iter = " + str(num_iter) +
                          ", time = " + str(round(current_time, 2)))

            # update 'learning' curve
            similarity[k] = S
            learning_curve.set_ydata(similarity)

            display(fig)

            time_elapsed = (time.clock() - start_time)

        if Affine:
            if MI:
                print("MI = " + str(similarity[-1]) +
                      " for the affine intensity-based registration after " +
                      str(num_iter) +
                      " iterations with a computation time of " +
                      str(time_elapsed))
            else:
                print("CC = " + str(similarity[-1]) +
                      " for the affine intensity-based registration after " +
                      str(num_iter) +
                      " iterations with a computation time of " +
                      str(time_elapsed))
        else:
            if MI:
                print("MI = " + str(similarity[-1]) +
                      " for the rigid intensity-based registration after " +
                      str(num_iter) +
                      " iterations with a computation time of " +
                      str(time_elapsed))
            else:
                print("CC = " + str(similarity[-1]) +
                      " for the rigid intensity-based registration after " +
                      str(num_iter) +
                      " iterations with a computation time of " +
                      str(time_elapsed))

        return similarity[-1], time_elapsed, Im_t

    else:
        x0 = x
        # Optimizing the parameters using nelder-mead
        if Affine:
            res = opt.minimize(lambda x: -optim_affine_corr(x, I, Im, MI),
                               x0,
                               method='nelder-mead',
                               options={
                                   'xtol': 1e-8,
                                   'disp': False
                               })
        else:
            res = opt.minimize(lambda x: -optim_rigid_corr(x, I, Im, MI),
                               x0,
                               method='nelder-mead',
                               options={
                                   'xtol': 1e-8,
                                   'disp': False
                               })

        x = res.x
        S, Im_t, _ = fun(x)
        clear_output(wait=True)

        current_time = time.clock() - start_time

        # update moving image and parameters
        im2.set_data(Im_t)
        txt1.set_text(np.array2string(x, precision=5, floatmode='fixed'))
        txt2.set_text("mu = " + str(mu) + ", num_iter = " + str(res.nit) +
                      ", time = " + str(round(current_time, 2)))

        # update 'learning' curve
        similarity = S
        learning_curve.set_ydata(similarity)

        display(fig)

        time_elapsed = (time.clock() - start_time)
        # print the performance of the function
        if Affine:
            if MI:
                print("MI = " + str(similarity) +
                      " for the affine intensity-based registration after " +
                      str(res.nit) +
                      " iterations with a computation time of " +
                      str(time_elapsed))
            else:
                print("CC = " + str(similarity) +
                      " for the affine intensity-based registration after " +
                      str(res.nit) +
                      " iterations with a computation time of " +
                      str(time_elapsed))
        else:
            if MI:
                print("MI = " + str(similarity) +
                      " for the rigid intensity-based registration after " +
                      str(res.nit) +
                      " iterations with a computation time of " +
                      str(time_elapsed))
            else:
                print("CC = " + str(similarity) +
                      " for the rigid intensity-based registration after " +
                      str(res.nit) +
                      " iterations with a computation time of " +
                      str(time_elapsed))

        return similarity, time_elapsed, Im_t