コード例 #1
0
def write_test_volumes(test_proj, test_label):

    with tf.Session(config=tf.ConfigProto(gpu_options=GPU_OPTIONS)) as sess:
        m = Model([test_proj], [test_label], [test_proj], [test_label], sess)

        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        # compute volumes before training and export dennerlein
        proj_filename = os.path.splitext(
            os.path.splitext(os.path.basename(test_proj))[0])[0]
        out_fa = LOG_DIR + ('test_%s_%s_fa.bin' %
                            (proj_filename, WEIGHTS_TYPE))
        out_la = LOG_DIR + ('test_%s_%s_la.bin' %
                            (proj_filename, WEIGHTS_TYPE))

        vol_before = m.test_vol
        write_dennerlein = dennerlein.write(out_la, vol_before)
        write_dennerlein_label = dennerlein.write(out_fa, m.test_label)
        sess.run([write_dennerlein, write_dennerlein_label])

        coord.request_stop()
        coord.join(threads)
        sess.close()

    tf.reset_default_graph()
コード例 #2
0
def test_model(validation_proj, validation_label, test_proj, test_label,
               save_path, log_dir, i):
    step = 0
    losses = []

    print("********* Iteracao: " + str(i) + "\n\n")

    # find checkpoint files
    checkpoints = []
    with open(save_path + 'checkpoint') as f:
        f = f.readlines()
        pattern = re.compile('all_model_checkpoint_paths:\s\"(.+)\"')
        for line in f:
            for match in re.finditer(pattern, line):
                checkpoints.append(match.groups()[0])

    with tf.Session(config=tf.ConfigProto(gpu_options=GPU_OPTIONS)) as sess:
        m = Model([test_proj], [test_label], [test_proj], [test_label], sess)

        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        #        write_png = png.writeSlice( m.test_label[0], log_dir + 'slice_label_'+str(i)+'.png' )
        #        sess.run(write_png)

        # compute volume before training and export central slice
        print('Computing volume ' + str(i) + ' without trained parameters')
        vol_before = m.test_vol
        #        write_png = png.writeSlice( vol_before[CONF.proj_shape.N // 2], log_dir + 'slice_before_training.png' )
        write_png = png.writeSlice(
            vol_before[31],
            log_dir + 'slice_before_training_' + str(i) + '.png')
        #        write_png_label = png.writeSlice( m.test_label[CONF.proj_shape.N // 2], log_dir + 'slice_label.png' )
        #        write_png_label = png.writeSlice( m.test_label[0], log_dir + 'slice_label.png' )

        #        vol_before_np, _, _, vol_label_np = sess.run( [vol_before, write_png, write_png_label, m.test_label] )
        vol_before_np, _ = sess.run([vol_before, write_png])

        # find best checkpoint
        #        print( 'Searching best checkpoint' )
        saver = tf.train.Saver()
        m.setTest(validation_proj, validation_label, sess)

        best_cp_i = 51
        best_cp_loss = math.inf

        #print("Hard Code for the best checkpoint 1102")

        print("\n\n**********************************")
        #print(checkpoints)
        print(checkpoints[best_cp_i])
        """
        for i, cp in enumerate( checkpoints ):
#        for i in range(300, 400):
            saver.restore( sess, checkpoints[i] )
            loss = sess.run( m.test_loss )
            print("Modelo: ")
            print(i)
            print("Loss: ")
            print(loss)

            if loss < best_cp_loss:
                best_cp_i = i
                best_cp_loss = loss
            print( '.', end = '', flush = True )
        """

        print('')
        print("Terminando de testar os modelos ********\n\n")

        print("Melhor loss: ")
        print(best_cp_i)
        saver.restore(sess, checkpoints[best_cp_i])
        #print("Loss do modelo " + str(best_cp_i))
        #loss = sess.run( m.test_loss )
        #print(loss)

        # load best model and set test volume
        print('Computing volume ' + str(i) + ' with trained parameters')
        m.setTest([test_proj], [test_label], sess)
        saver.restore(sess, checkpoints[best_cp_i])

        # compute volume after training and export central slice + dennerlein
        vol_after = m.test_vol
        #write_png = png.writeSlice( vol_after[CONF.proj_shape.N // 2], log_dir + 'slice_after_training.png' )
        write_png = png.writeSlice(
            vol_after[31], log_dir + 'slice_after_training_' + str(i) + '.png')
        write_dennerlein = dennerlein.write(
            log_dir + 'after_training_' + str(i) + '.bin', vol_after)
        vol_after_np, _, _ = sess.run([vol_after, write_png, write_dennerlein])

        coord.request_stop()
        coord.join(threads)
        sess.close()

    tf.reset_default_graph()

    return losses, step
コード例 #3
0
def create_label(fn_proj, fn_vol, rec, geom):
    proj = dennerlein.read_noqueue(fn_proj)
    volume = rec.apply(proj, geom,
                       fullscan=True)  #MUDAR DEPOIS PARA TRUE!!!!!!
    return dennerlein.write(fn_vol, volume)
コード例 #4
0
def test_model(validation_proj, validation_label, test_proj, test_label,
               save_path, log_dir):
    step = 0
    losses = []

    # find checkpoint files
    checkpoints = []
    with open(save_path + 'checkpoint') as f:
        f = f.readlines()
        pattern = re.compile('all_model_checkpoint_paths:\s\"(.+)\"')
        for line in f:
            for match in re.finditer(pattern, line):
                checkpoints.append(match.groups()[0])

    with tf.Session(config=tf.ConfigProto(gpu_options=GPU_OPTIONS)) as sess:
        m = Model([test_proj], [test_label], [test_proj], [test_label], sess)

        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        # compute volume before training and export central slice
        print('Computing volume without trained parameters')
        vol_before = m.test_vol
        write_png = png.writeSlice(vol_before[CONF.proj_shape.N // 2],
                                   log_dir + 'slice_before_training.png')
        write_png_label = png.writeSlice(m.test_label[CONF.proj_shape.N // 2],
                                         log_dir + 'slice_label.png')
        vol_before_np, _, _, vol_label_np = sess.run(
            [vol_before, write_png, write_png_label, m.test_label])

        # find best checkpoint
        print('Searching best checkpoint')
        saver = tf.train.Saver()
        m.setTest(validation_proj, validation_label, sess)

        best_cp_i = 0
        best_cp_loss = math.inf

        for i, cp in enumerate(checkpoints):
            saver.restore(sess, cp)
            loss = sess.run(m.test_loss)

            if loss < best_cp_loss:
                best_cp_i = i
                best_cp_loss = loss
            print('.', end='', flush=True)
        print('')

        # load best model and set test volume
        print('Computing volume with trained parameters')
        m.setTest([test_proj], [test_label], sess)
        saver.restore(sess, checkpoints[best_cp_i])

        # compute volume after training and export central slice + dennerlein
        vol_after = m.test_vol
        write_png = png.writeSlice(vol_after[CONF.proj_shape.N // 2],
                                   log_dir + 'slice_after_training.png')
        write_dennerlein = dennerlein.write(log_dir + 'after_training.bin',
                                            vol_after)
        vol_after_np, _, _ = sess.run([vol_after, write_png, write_dennerlein])

        coord.request_stop()
        coord.join(threads)
        sess.close()

    tf.reset_default_graph()

    return losses, step