示例#1
0
def train(img_x, img_y, patch_size, patch_step, dim_img, nb_filters, nb_conv, batch_size, nb_epoch):
    """
    Function description.

    Parameters
    ----------
    parameter_01 : type
        Description.

    parameter_02 : type
        Description.

    parameter_03 : type
        Description.

    Returns
    -------
    return_01
        Description.
    """

    img_x = utils.nor_data(img_x)
    img_y = utils.nor_data(img_y)
    img_input = utils.extract_patches(img_x, patch_size, patch_step)
    img_output = utils.extract_patches(img_y, patch_size, patch_step)
    img_input = np.reshape(img_input, (len(img_input), 1, dim_img, dim_img))
    img_output = np.reshape(img_output, (len(img_input), 1, dim_img, dim_img))

    mdl = model(dim_img, nb_filters, nb_conv)
    mdl.fit(img_input, img_output, batch_size=batch_size, nb_epoch=nb_epoch)
    return mdl
示例#2
0
def train(img_x, img_y, patch_size, patch_step, dim_img, nb_filters, nb_conv,
          batch_size, nb_epoch):
    """
    Function description.

    Parameters
    ----------
    parameter_01 : type
        Description.

    parameter_02 : type
        Description.

    parameter_03 : type
        Description.

    Returns
    -------
    return_01
        Description.
    """

    img_x = utils.nor_data(img_x)
    img_y = utils.nor_data(img_y)
    img_input = utils.extract_patches(img_x, patch_size, patch_step)
    img_output = utils.extract_patches(img_y, patch_size, patch_step)
    img_input = np.reshape(img_input, (len(img_input), 1, dim_img, dim_img))
    img_output = np.reshape(img_output, (len(img_input), 1, dim_img, dim_img))

    mdl = model(dim_img, nb_filters, nb_conv)
    mdl.fit(img_input, img_output, batch_size=batch_size, nb_epoch=nb_epoch)
    return mdl
示例#3
0
def seg_train(img_x, img_y, patch_size = 32,
                patch_step = 1, nb_conv = 32, size_conv = 3,
                batch_size =1000, nb_epoch = 20, nb_down = 2, nb_gpu = 1):
    """
    Function description.
    Parameters
    ----------
    img_x: array, 2D or 3D
        Training input of the model. It is the raw image for the segmentation.
    img_y: array, 2D or 3D
        Training output of the model. It is the corresponding segmentation of the training input.
    patch_size: int
        The size of the small patches extracted from the input images. This size should be big enough to cover the
        features of the segmentation object.
    patch_step: int
         The pixel steps between neighbour patches. Larger steps leads faster speed, but less quality. I recommend 1
         unless you need quick test of the algorithm.
    nb_conv: int
          Number of the covolutional kernals for the first layer. This number doubles after each downsampling layer.
    size_conv: int
          Size of the convolutional kernals.
    batch_size: int
          Batch size for the training. Bigger size leads faster speed. However, it is restricted by the memory size of
          the GPU. If the user got the memory error, please decrease the batch size.
    nb_epoch: int
          Number of the epoches for the training. It can be understand as the number of iterations during the training.
          Please define this number as the actual convergence for different data.
    nb_down: int
          Number of the downsampling for the images in the model.
    nb_gpu: int
          Number of GPUs you want to use for the training.
    Returns
    -------
    mdl
        The trained CNN model for segmenation. The model can be saved for future segmentations.
    """
    # if img_x.ndim == 3:
    #     _, ih, iw = img_x.shape
    # else:
    #     ih, iw = img_x.shape
    patch_shape = (patch_size, patch_size)
    # print img_x.shape
    # print img_x.max(), img_x.min()
    img_x = nor_data(img_x)
    img_y = nor_data(img_y)
    # print img_x.shape
    # print img_x.max(), img_x.min()

    train_x = extract_3d(img_x, patch_shape, patch_step)
    train_y = extract_3d(img_y, patch_shape, patch_step)
    # print train_x.shape
    # print train_x.max(), train_x.min()
    train_x = np.reshape(train_x, (len(train_x), patch_size, patch_size, 1))
    train_y = np.reshape(train_y, (len(train_y), patch_size, patch_size, 1))
    mdl = model_choose(patch_size, patch_size, nb_conv, size_conv, nb_down, nb_gpu)
    print(mdl.summary())
    mdl.fit(train_x, train_y, batch_size=batch_size, epochs=nb_epoch)
    return mdl
示例#4
0
def predict(mdl, img, patch_size, patch_step, batch_size, dim_img):
    """
    the cnn model for image transformation


    Parameters
    ----------
    img : array
        The image need to be calculated

    patch_size : (int, int)
        The patches dimension

    dim_img : int
        The input image dimension

    Returns
    -------
    img_rec
        Description.

      """
    img = np.float16(utils.nor_data(img))
    img_y, img_x = img.shape
    x_img = utils.extract_patches(img, patch_size, patch_step)
    x_img = np.reshape(x_img, (len(x_img), 1, dim_img, dim_img))
    y_img = mdl.predict(x_img, batch_size=batch_size)
    del x_img
    y_img = np.reshape(y_img, (len(y_img), dim_img, dim_img))
    img_rec = utils.reconstruct_patches(y_img, (img_y, img_x), patch_step)
    return img_rec
示例#5
0
def predict(mdl, img, patch_size, patch_step, batch_size, dim_img):
    """
    the cnn model for image transformation


    Parameters
    ----------
    img : array
        The image need to be calculated

    patch_size : (int, int)
        The patches dimension

    dim_img : int
        The input image dimension

    Returns
    -------
    img_rec
        Description.

      """
    img = np.float16(utils.nor_data(img))
    img_y, img_x = img.shape
    x_img = utils.extract_patches(img, patch_size, patch_step)
    x_img = np.reshape(x_img, (len(x_img), 1, dim_img, dim_img))
    y_img = mdl.predict(x_img, batch_size=batch_size)
    del x_img
    y_img = np.reshape(y_img, (len(y_img), dim_img, dim_img))
    img_rec = utils.reconstruct_patches(y_img, (img_y, img_x), patch_step)
    return img_rec
示例#6
0
def predict(mdl, img, patch_size, patch_step, batch_size, dim_img):
    """
    the cnn model for image transformation


    Parameters
    ----------
    img : array
        The image need to be calculated

    patch_size : (int, int)
        The patches dimension

    dim_img : int
        The input image dimension

    Returns
    -------
    img_rec
        Description.

      """
    img = np.float16(utils.nor_data(img))
    img_h, img_w = img.shape
    input_img = utils.extract_patches(img, patch_size, patch_step)
    input_img = np.reshape(input_img, (input_img.shape[0], dim_img, dim_img, 1))
    output_img = mdl.predict(input_img, batch_size=batch_size)
    del input_img
    output_img = np.reshape(output_img, (output_img.shape[0], dim_img, dim_img))
    img_rec = utils.reconstruct_patches(output_img, (img_h, img_w), patch_step)
    return img_rec
示例#7
0
mdl.load_weights('weight_center.h5')
print('The model loading time is %s seconds'%(time.time()-start_time))

Y_score = np.zeros((50, 501))
for i in range(50):
    slice_num = (i+2)*20
    datapath = '/home/oxygen/YANGX/Globus/center/test_04/slice'+str(slice_num)+'/*.tiff'
    # print(datapath)
    fnames = glob.glob(datapath)
    fnames = np.sort(fnames)
    # print(fnames)

    for j in range(len(fnames)):
        img = dxchange.read_tiff(fnames[j])
        img = -nor_data(img)
        # X_evl = np.zeros((nb_evl, dim_img, dim_img))
        # for k in range(nb_evl):
        #     X_evl[k] = img_window(img[360:1660, 440:1640], dim_img)
        X_evl = extract_patches(img[360:1660, 440:1640],
                                (128, 128), step=64, max_patches=None, random_state=None)
        X_evl = X_evl.reshape(X_evl.shape[0], 1, dim_img, dim_img)
        Y_evl = mdl.predict(X_evl, batch_size=batch_size)

        Y_score[i, j] = sum(np.dot(Y_evl, [0, 1]))
    # print(Y_score[i])

    #print('The evaluate score is:', Y_score[i])
    #Y_score = sum(np.round(Y_score))/len(Y_score)
    ind_max = np.argmax(Y_score[i, :])
    print('The well-centered reconstruction is:', fnames[ind_max])
示例#8
0
mdl.load_weights('weight_center.h5')
print('The model loading time is %s seconds' % (time.time() - start_time))

Y_score = np.zeros((50, 501))
for i in range(50):
    slice_num = (i + 2) * 20
    datapath = '/home/oxygen/YANGX/Globus/center/test_04/slice' + str(
        slice_num) + '/*.tiff'
    # print(datapath)
    fnames = glob.glob(datapath)
    fnames = np.sort(fnames)
    # print(fnames)

    for j in range(len(fnames)):
        img = dxchange.read_tiff(fnames[j])
        img = -nor_data(img)
        # X_evl = np.zeros((nb_evl, dim_img, dim_img))
        # for k in range(nb_evl):
        #     X_evl[k] = img_window(img[360:1660, 440:1640], dim_img)
        X_evl = extract_patches(img[360:1660, 440:1640], (128, 128),
                                step=64,
                                max_patches=None,
                                random_state=None)
        X_evl = X_evl.reshape(X_evl.shape[0], 1, dim_img, dim_img)
        Y_evl = mdl.predict(X_evl, batch_size=batch_size)

        Y_score[i, j] = sum(np.dot(Y_evl, [0, 1]))
    # print(Y_score[i])

    #print('The evaluate score is:', Y_score[i])
    #Y_score = sum(np.round(Y_score))/len(Y_score)
示例#9
0
def rec_dcgan_back(prj, ang, save_wpath, init_wpath=None, **kwargs):
    tf.reset_default_graph()
    cnn_kwargs = [
        'learning_rate', 'num_steps', 'display_step', 'conv_nb', 'conv_size',
        'dropout', 'weights_init', 'method', 'cost_rate'
    ]
    kwargs_defaults = _get_tomolearn_kwargs()
    for kw in cnn_kwargs:
        kwargs.setdefault(kw, kwargs_defaults[kw])
    if init_wpath:
        kwargs['weights_init'] = True

    _, nang, px, _ = prj.shape
    prj = nor_data(prj)
    img_input = tf.placeholder(tf.float32, prj.shape)
    img_output = tf.placeholder(tf.float32, prj.shape)

    pred, recon = tomo_learn(img_input,
                             ang,
                             px,
                             reuse=False,
                             conv_nb=kwargs['conv_nb'],
                             conv_size=kwargs['conv_size'],
                             dropout=kwargs['dropout'],
                             method=kwargs['method'])
    disc_real = discriminator(img_output)
    disc_fake = discriminator(pred, reuse=True)

    gen_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_fake,
                                                                      labels=tf.ones_like(disc_fake))) \
               + tf.reduce_mean(tf.abs(img_output-pred))*kwargs['cost_rate']

    disc_loss_real = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(
            logits=disc_real, labels=tf.ones_like(disc_real)))
    disc_loss_fake = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(
            logits=disc_fake, labels=tf.zeros_like(disc_fake)))
    disc_loss = disc_loss_real + disc_loss_fake

    gen_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                 scope='generator')
    disc_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                  scope='discriminator')

    optimizer_gen = tf.train.AdamOptimizer(
        learning_rate=kwargs['learning_rate'])
    optimizer_disc = tf.train.AdamOptimizer(
        learning_rate=kwargs['learning_rate'])

    train_gen = optimizer_gen.minimize(gen_loss, var_list=gen_vars)
    train_disc = optimizer_disc.minimize(disc_loss, var_list=disc_vars)

    fig, axs = plt.subplots(2, 1, figsize=(8, 16))
    init = tf.global_variables_initializer()
    saver = tf.train.Saver()

    with tf.Session() as sess:
        # Run the initializer
        sess.run(init)
        if kwargs['weights_init']:
            if init_wpath == None:
                print('Please provide the file name of initial weights.')
            saver.restore(sess, init_wpath)
        for step in range(1, kwargs['num_steps'] + 1):

            with tf.device('/device:GPU:1'):
                dl, _ = sess.run([disc_loss, train_disc],
                                 feed_dict={
                                     img_input: prj,
                                     img_output: prj
                                 })
            with tf.device('/device:GPU:2'):
                gl, _ = sess.run([gen_loss, train_gen],
                                 feed_dict={
                                     img_input: prj,
                                     img_output: prj
                                 })

            if step % kwargs['display_step'] == 0 or step == 1:
                pred, recon = sess.run(
                    tomo_learn(prj,
                               ang,
                               px,
                               reuse=True,
                               conv_nb=kwargs['conv_nb'],
                               conv_size=kwargs['conv_size'],
                               dropout=kwargs['dropout'],
                               method=kwargs['method']))
                sino_plt = np.reshape(pred, (nang, px))
                rec_plt = np.reshape(recon, (px, px))
                #
                ax = axs[0]
                ax.imshow(sino_plt, vmax=1, cmap='jet')
                plt.axis('off')
                ax = axs[1]
                ax.imshow(rec_plt, vmax=1, cmap='jet')
                plt.axis('off')
                plt.pause(0.1)

                print("Step " + str(step) + ", Generator Loss= " +
                      "{:.7f}".format(gl) + ', Discriminator loss = ' +
                      "{:.7f}".format(dl))
        plt.close(fig)
        saver.save(sess, save_wpath)
    return recon
示例#10
0
def phase_dcgan(ifp, h, save_wpath, init_wpath = None, **kwargs):
    ops.reset_default_graph()
    tf.compat.v1.disable_eager_execution()
    cnn_kwargs = ['pure_phase', 'learning_rate_g', 'learning_rate_d', 'num_steps', 'display_step', 'conv_nb',
                  'conv_size', 'dropout', 'weights_init','cost_rate', 'gl_tol', 'iter_plot']
    kwargs_defaults = _get_phaselearn_kwargs()
    for kw in cnn_kwargs:
        kwargs.setdefault(kw, kwargs_defaults[kw])
    if init_wpath:
        kwargs['weights_init'] = True

    _, px, px, _ = ifp.shape
    img_input = tf.compat.v1.placeholder(tf.float32, ifp.shape)
    img_output = tf.compat.v1.placeholder(tf.float32, ifp.shape)
    ifp = nor_data(ifp)
    pred, phase, absorption = phase_learn(ifp, h, px, reuse=False,
                                          pure_phase= kwargs['pure_phase'],
                                          conv_nb = kwargs['conv_nb'],
                                          conv_size = kwargs['conv_size'],
                                          dropout = kwargs['dropout'])
    disc_real = discriminator(img_output)
    disc_fake = discriminator(pred, reuse=True)

    gen_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_fake,
                                                                      labels=tf.ones_like(disc_fake))) \
               + tf.reduce_mean(tf.abs(img_output - pred)) * kwargs['cost_rate']
               # + tf.reduce_mean(tf.losses.mean_squared_error(img_output, pred)) * kwargs['cost_rate']

               # + tf.reduce_mean(tf.losses.mean_squared_error(img_output, pred)) * kwargs['cost_rate']



    disc_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_real,
                                                                            labels=tf.ones_like(disc_real)))
    disc_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_fake,
                                                                            labels=tf.zeros_like(disc_fake)))
    disc_loss = disc_loss_real + disc_loss_fake

    gen_vars = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES, scope='generator')
    disc_vars = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator')

    optimizer_gen = tf.compat.v1.train.AdamOptimizer(learning_rate=kwargs['learning_rate_g'])
    optimizer_disc = tf.compat.v1.train.AdamOptimizer(learning_rate=kwargs['learning_rate_d'])

    train_gen = optimizer_gen.minimize(gen_loss, var_list=gen_vars)
    train_disc = optimizer_disc.minimize(disc_loss, var_list=disc_vars)


 ######################################################################
 # plots for debug
    if kwargs['iter_plot']:
        fig, axs = plt.subplots(2, 3, figsize=(16, 8))
        im0 = axs[0, 0].imshow(ifp.reshape(px, px), cmap='jet')
        tx0 = axs[0, 0].set_title('Intensity')
        fig.colorbar(im0, ax=axs[0, 0])
        tx1 = axs[1, 0].set_title('Absolute error of the intensity for iteration 0')
        im1 = axs[1, 0].imshow(ifp.reshape(px, px), cmap='jet')
        fig.colorbar(im1, ax=axs[1, 0])
        im2 = axs[0, 1].imshow(np.zeros((px, px)), cmap='gray')
        fig.colorbar(im2, ax=axs[0, 1])
        tx2 = axs[0, 1].set_title('Phase')
        im3 = axs[0, 2].imshow(np.zeros((px, px)), cmap='jet')
        fig.colorbar(im3, ax=axs[0, 2])
        tx3 = axs[0, 2].set_title('Absorption')
        xdata, g_loss = [], []
        im4, = axs[1, 1].plot(xdata, g_loss, 'r-')
        axs[1, 1].set_yscale('log')
        tx4 = axs[1, 1].set_title('Generator loss')
        plt.tight_layout()
#########################################################################

    rec_tmp = tf.zeros([1, px, px, 1])


    init = tf.compat.v1.global_variables_initializer()
    saver = tf.compat.v1.train.Saver()

    with tf.compat.v1.Session() as sess:
        # Run the initializer
        sess.run(init)
        if kwargs['weights_init']:
            if init_wpath == None:
                print('Please provide the file name of initial weights.')
            saver.restore(sess, init_wpath)
        for step in range(1, kwargs['num_steps'] + 1):

            # disc_y = np.concatenate([np.ones([1]), np.zeros([1])], axis=0)
            # gen_y = np.ones([1])
            # feed_dict = {img_input: prj, img_output: prj}
            # _, _, gl, dl = sess.run([train_gen, train_disc, gen_loss, disc_loss], feed_dict=feed_dict)
            with tf.device('/device:GPU:1'):
                dl, _ = sess.run([disc_loss, train_disc],
                                 feed_dict={img_input: ifp, img_output: ifp})
            with tf.device('/device:GPU:2'):
                gl, _ = sess.run([gen_loss, train_gen], feed_dict={img_input: ifp, img_output: ifp})
            # print(gl, gl.shape, gl.dtype)
            xdata.append(step)
            g_loss.append(gl)
            # print(np.array(g_loss).dtype, np.array(g_loss))


            # ax = axs[0]
            # ax.plot(gl)
            if np.isnan(gl):
                sess.run(init)

            if step % kwargs['display_step'] == 0 or step == 1:
                pred, phase, absorption = sess.run(phase_learn(ifp, h, px, reuse=False,
                                                   pure_phase= kwargs['pure_phase'],
                                                   conv_nb = kwargs['conv_nb'],
                                                   conv_size = kwargs['conv_size'],
                                                   dropout = kwargs['dropout']))
                # if (np.isnan(recon.any())) or (recon.all()==0):
                #     sess.run(init)
            ###########################################################
                ifp_plt = np.reshape(pred, (px, px))
                ifp_plt = np.abs(ifp_plt - ifp.reshape((px, px)))
                rec_plt = np.reshape(phase, (px, px))
                abs_plt = np.reshape(absorption, (px, px))
                tx1.set_text('Absolute error of the intensity for iteration {0}'.format(step))
                vmax = np.max(ifp_plt)
                vmin = np.min(ifp_plt)
                im1.set_data(ifp_plt)
                im1.set_clim(vmin, vmax)
                im2.set_data(rec_plt)
                vmax = np.max(rec_plt)
                vmin = np.min(rec_plt)
                im2.set_clim(vmin, vmax)
                im3.set_data(abs_plt)
                vmax = np.max(abs_plt)
                vmin = np.min(abs_plt)
                im3.set_clim(vmin, vmax)
                axs[1, 1].plot(xdata, g_loss, 'r-')
                plt.pause(0.1)

            ######################################################################
                print("Step " + str(step) + ", Generator Loss= " + "{:.7f}".format(gl) +
                      ', Discriminator loss = '+ "{:.7f}".format(dl))
            if gl<kwargs['gl_tol']:
                _, phase, aborption = sess.run(phase_learn(ifp, h, px, reuse=False,
                                                           pure_phase = kwargs['pure_phase'],
                                                           conv_nb = kwargs['conv_nb'],
                                                           conv_size = kwargs['conv_size'],
                                                           dropout = kwargs['dropout']))
                break
            if step > (kwargs['num_steps'] - 10):
                _, phase, absorption = sess.run(phase_learn(ifp, h, px, reuse=False,
                                                            pure_phase = kwargs['pure_phase'],
                                                            conv_nb = kwargs['conv_nb'],
                                                            conv_size = kwargs['conv_size'],
                                                            dropout = kwargs['dropout']))
                rec_tmp = tf.concat([rec_tmp, phase], axis=0)
                # print(rec_tmp.shape)
        plt.close(fig)
        saver.save(sess, save_wpath)
        # _, recon = sess.run(tomo_learn(prj, ang, px, reuse=True, conv_nb = kwargs['conv_nb'],
        #                      conv_size = kwargs['conv_size'],
        #                      dropout = kwargs['dropout'],
        #                      method = kwargs['method']))
        if rec_tmp.shape[0] >1:
            phase = tf.reduce_mean(rec_tmp, axis=0).eval()


        # print(recon.shape)
    return phase, absorption
示例#11
0
nb_epoch = 12

# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
nb_pool = 2
# convolution kernel size
nb_conv = 3

fname = '../../test/test_data/1038.tiff'
ind_uncenter1 = range(1038, 1047)
ind_uncenter2 = range(1049, 1057)
uncenter1 = dxchange.read_tiff_stack(fname, ind=ind_uncenter1, digit=4)
uncenter2 = dxchange.read_tiff_stack(fname, ind=ind_uncenter2, digit=4)
uncenter = np.concatenate((uncenter1, uncenter2), axis=0)
uncenter = nor_data(uncenter)
print (uncenter.shape)
uncenter = img_window(uncenter[:, 360:1460, 440:1440], 200)
print (uncenter.shape)
uncenter_patches = extract_3d(uncenter, patch_size, 1)
np.random.shuffle(uncenter_patches)
print (uncenter_patches.shape)
# print uncenter_patches.shape
center_img = dxchange.read_tiff('../../test/test_data/1048.tiff')
center_img = nor_data(center_img)
print (center_img.shape)
center_img = img_window(center_img[360:1460, 440:1440], 400)
center_patches = extract_3d(center_img, patch_size, 1)
np.random.shuffle(center_patches)
print (center_patches.shape)
# plt.imshow(center_img, cmap='gray', interpolation= None)
示例#12
0
def seg_predict(img, wpath, patch_size = 32, patch_step = 1,
                  nb_conv=32, size_conv=3,
                  batch_size=1000, nb_down=2, nb_gpu = 1):
    """
    Function description
    Parameters
    ----------
    img : array
        The images need to be segmented.
    wpath: string
        The path where the trained weights of the model can be read.
    
    patch_size: int
        The size of the small patches extracted from the input images. This size should be big enough to cover the
        features of the segmentation object.
    patch_step: int
         The pixel steps between neighbour patches. Larger steps leads faster speed, but less quality. I recommend 1
         unless you need quick test of the algorithm.
    nb_conv: int
          Number of the covolutional kernals for the first layer. This number doubles after each downsampling layer.
    size_conv: int
          Size of the convolutional kernals.
    batch_size: int
          Batch size for the training. Bigger size leads faster speed. However, it is restricted by the memory size of
          the GPU. If the user got the memory error, please decrease the batch size.
    nb_epoch: int
          Number of the epoches for the training. It can be understand as the number of iterations during the training.
          Please define this number as the actual convergence for different data.
    nb_down: int
          Number of the downsampling for the images in the model.
    nb_gpu: int
          Number of GPUs you want to use for the training.
    Returns
    -------
    save the segmented images to the spath.
      """
    patch_shape = (patch_size, patch_size)
    img = np.float32(nor_data(img))
    mdl = model_choose(patch_size, patch_size, nb_conv, size_conv, nb_down, nb_gpu)
    # print(mdl.summary())
    mdl.load_weights(wpath)
    if img.ndim == 2:
        ih, iw = img.shape
        predict_x = extract_3d(img, patch_shape, patch_step)
        predict_x = np.reshape(predict_x, (predict_x.shape[0], patch_size, patch_size, 1))
        predict_y = mdl.predict(predict_x, batch_size=batch_size)
        predict_y = np.reshape(predict_y, (predict_y.shape[0],patch_size, patch_size))
        predict_y = reconstruct_patches(predict_y, (ih, iw), patch_step)
        return predict_y

    else:
        pn, ih, iw = img.shape
        images = np.empty(pn, dtype=object) # create empty array 
        for i in range(pn):
            print('Processing the %s th image' % i)
            tstart = time.time()
            predict_x = img[i]
            predict_x = extract_3d(predict_x, patch_shape, patch_step)
            predict_x = np.reshape(predict_x, (len(predict_x), patch_size, patch_size, 1))
            predict_y = mdl.predict(predict_x, batch_size=batch_size)
            predict_y = np.reshape(predict_y, (len(predict_y), patch_size, patch_size))
            predict_y = reconstruct_patches(predict_y, (ih, iw), patch_step)
            images[i]=predict_y
            return images
示例#13
0
def seg_train(img_x, img_y, **kwargs):
    """
    Function description.

    Parameters
    ----------
    img_x: array, 2D or 3D
        Training input of the model. It is the raw image for the segmentation.

    img_y: array, 2D or 3D
        Training output of the model. It is the corresponding segmentation of the training input.

    patch_size: int
        The size of the small patches extracted from the input images. This size should be big enough to cover the
        features of the segmentation object.

    patch_step: int
         The pixel steps between neighbour patches. Larger steps leads faster speed, but less quality. I recommend 1
         unless you need quick test of the algorithm.

    conv_nb: int
          Number of the covolutional kernals for the first layer. This number doubles after each downsampling layer.

    conv_size: int
          Size of the convolutional kernals.

    batch_size: int
          Batch size for the training. Bigger size leads faster speed. However, it is restricted by the memory size of
          the GPU. If the user got the memory error, please decrease the batch size.

    nb_epoch: int
          Number of the epoches for the training. It can be understand as the number of iterations during the training.
          Please define this number as the actual convergence for different data.

    model_layers: int
          Number of the downsampling for the images in the model.

    gpu_nb: int
          Number of GPUs you want to use for the training.


    Returns
    -------
    mdl
        The trained CNN model for segmenation. The model can be saved for future segmentations.
    """
    seg_kwargs = [
        'patch_size', 'patch_step', 'conv_nb', 'conv_size', 'batch_size',
        'epoch_nb', 'model_layers', 'gpu_nb'
    ]
    kwargs_defaults = _get_seg_kwargs()
    for kw in seg_kwargs:
        kwargs.setdefault(kw, kwargs_defaults[kw])
    patch_shape = (kwargs['patch_size'], kwargs['patch_size'])
    # print img_x.shape
    # print img_x.max(), img_x.min()
    img_x = nor_data(img_x)
    img_y = nor_data(img_y)
    # print img_x.shape
    # print img_x.max(), img_x.min()

    train_x = extract_3d(img_x, patch_shape, kwargs['patch_step'])
    train_y = extract_3d(img_y, patch_shape, kwargs['patch_step'])
    # print train_x.shape
    # print train_x.max(), train_x.min()
    train_x = np.reshape(
        train_x, (len(train_x), kwargs['patch_size'], kwargs['patch_size'], 1))
    train_y = np.reshape(
        train_y, (len(train_y), kwargs['patch_size'], kwargs['patch_size'], 1))
    mdl = model_choose(kwargs['patch_size'], kwargs['patch_size'],
                       kwargs['conv_nb'], kwargs['conv_size'],
                       kwargs['model_layers'], kwargs['gpu_nb'])
    print(mdl.summary())
    mdl.fit(train_x,
            train_y,
            batch_size=kwargs['batch_size'],
            epochs=kwargs['nb_epoch'])
    return mdl
示例#14
0
def seg_predict(img, wpath, spath, **kwargs):
    """
    Function description

    Parameters
    ----------
    img : array
        The images need to be segmented.

    wpath: string
        The path where the trained weights of the model can be read.

    spath: string
        The path to save the segmented images.

    patch_size: int
        The size of the small patches extracted from the input images. This size should be big enough to cover the
        features of the segmentation object.

    patch_step: int
         The pixel steps between neighbour patches. Larger steps leads faster speed, but less quality. I recommend 1
         unless you need quick test of the algorithm.

    conv_nb: int
          Number of the covolutional kernals for the first layer. This number doubles after each downsampling layer.

    conv_size: int
          Size of the convolutional kernals.

    batch_size: int
          Batch size for the training. Bigger size leads faster speed. However, it is restricted by the memory size of
          the GPU. If the user got the memory error, please decrease the batch size.

    nb_epoch: int
          Number of the epoches for the training. It can be understand as the number of iterations during the training.
          Please define this number as the actual convergence for different data.

    model_layers: int
          Number of the downsampling for the images in the model.

    gpu_nb: int
          Number of GPUs you want to use for the training.

    Returns
    -------
    save the segmented images to the spath.

      """
    seg_kwargs = [
        'patch_size', 'patch_step', 'conv_nb', 'conv_size', 'batch_size',
        'epoch_nb', 'model_layers', 'gpu_nb'
    ]
    kwargs_defaults = _get_seg_kwargs()
    for kw in seg_kwargs:
        kwargs.setdefault(kw, kwargs_defaults[kw])
    patch_shape = (kwargs['patch_size'], kwargs['patch_size'])
    img = np.float32(nor_data(img))
    mdl = model_choose(kwargs['patch_size'], kwargs['patch_size'],
                       kwargs['conv_nb'], kwargs['conv_size'],
                       kwargs['model_layers'], kwargs['gpu_nb'])
    # print(mdl.summary())
    mdl.load_weights(wpath)
    if img.ndim == 2:
        ih, iw = img.shape
        predict_y = pred_single(mdl, img, ih, iw, patch_shape,
                                kwargs['patch_step'], kwargs['patch_size'],
                                kwargs['batch_size'])
        fname = spath + 'seg'
        dxchange.write_tiff(predict_y, fname, dtype='float32')
    else:
        pn, ih, iw = img.shape
        for i in range(pn):
            print('Processing the %s th image' % i)
            tstart = time.time()
            predict_x = img[i]
            predict_y = pred_single(mdl, predict_x, ih, iw, patch_shape,
                                    kwargs['patch_step'], kwargs['patch_size'],
                                    kwargs['batch_size'])
            predict_y = np.float32(predict_y)
            fname = spath + 'seg' + "-%03d" % (i)
            dxchange.write_tiff(predict_y, fname, dtype='float32')
            print('The prediction runs for %s seconds' %
                  (time.time() - tstart))
示例#15
0
nb_evl = 100

start_time = time.time()
fnames = glob.glob('../../test/test_data/*.tiff')
fnames = np.sort(fnames)

mdl = model(dim_img, nb_filters, nb_conv, nb_classes)

mdl.load_weights('classify_training_weights.h5')
print('The model loading time is %s seconds' % (time.time() - start_time))
start_time = time.time()
Y_score = np.zeros((len(fnames)))

for i in range(len(fnames)):
    img = dxchange.read_tiff(fnames[i])
    img = nor_data(img)
    X_evl = np.zeros((nb_evl, dim_img, dim_img))

    for j in range(nb_evl):
        X_evl[j] = img_window(img[360:1460, 440:1440], dim_img)
    X_evl = X_evl.reshape(X_evl.shape[0], 1, dim_img, dim_img)
    Y_evl = mdl.predict(X_evl, batch_size=batch_size)
    Y_score[i] = sum(np.dot(Y_evl, [0, 1]))
    #print('The evaluate score is:', Y_score[i])
    #Y_score = sum(np.round(Y_score))/len(Y_score)

ind_max = np.argmax(Y_score)
print('The well-centered reconstruction is:', fnames[ind_max])
print('The prediction runs for %s seconds' % (time.time() - start_time))
plt.plot(Y_score)
plt.show()