예제 #1
0
def minha_funcao_pessoal(validation_proj, validation_label, test_proj,
                         test_label, save_path, log_dir, i):
    print("Escrevendo o bang *************************************8")
    # Vamos tentar escrever o png do slice da reconstrução analítica

    # Simplesmente cria o arquivo na pasta.
    update_labels()

    print("\n\nPegando o proj")
    print(test_proj)
    print(test_label)

    with tf.Session(config=tf.ConfigProto(gpu_options=GPU_OPTIONS)) as sess:

        #print("\n\n\nTest label: ")
        #print(test_label)
        #print("\n\n\n")
        m = Model([test_proj], [test_label], [test_proj], [test_label], sess)

        #        print("\n\n\nDados das projeções: ")
        #        print(m.train_data)

        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        print("\n\n printando o tensor ***************")
        oi = sess.run(m.test_label)

        np.save("mel_dels", oi)

        print(
            "\n\n\n*****************PRINTADNO A PROJEÇÃO ANTES DE ESCREVER EM ARQUIVO!!!"
        )
        #        print(oi)
        #        print(oi.shape)
        #        exit()

        #        for i in range(1):
        write_png = png.writeSlice(m.test_label[0],
                                   log_dir + 'slice_label_' + str(i) + '.png')
        sess.run(write_png)

        #write_png = png.writeSlice( m.train_data[0], log_dir + 'uma_projecao_do_train.png' )
        #sess.run(write_png)

        return

        # compute volume before training and export central slice
        print('Computing volume without trained parameters')
        vol_before = m.test_vol
        write_png = png.writeSlice(vol_before[CONF.proj_shape.N // 2],
                                   log_dir + 'slice_before_training.png')
        write_png_label = png.writeSlice(m.test_label[CONF.proj_shape.N // 2],
                                         log_dir + 'slice_label.png')
        vol_before_np, _, _, vol_label_np = sess.run(
            [vol_before, write_png, write_png_label, m.test_label])
예제 #2
0
def minha_funcao_pessoal2():

    with tf.Session() as sess:

        train_writer = tf.summary.FileWriter('./logs/1/train ', sess.graph)

        sets = split_train_validation_set(0)
        m = Model(*sets, sess)

        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        """
        write_png = png.writeSlice( m.train_proj[0], '/media/davi/526E10CC6E10AAAD/mestrado_davi/train/test_model_0/uma_projecao_do_train.png' )
        sess.run(write_png)

        
        #volume_la = sess.run(volume_la)

        write_png = png.writeSlice( volume_la[0], '/media/davi/526E10CC6E10AAAD/mestrado_davi/train/test_model_0/uma_slice_do_train.png' )
        sess.run(write_png)

        write_png = png.writeSlice( m.test_label[0], '/media/davi/526E10CC6E10AAAD/mestrado_davi/train/test_model_0/test_label_0.png' )
        sess.run(write_png)

        """

        optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE)
        for i in range(100):

            merge = tf.summary.merge_all()

            summary = sess.run(merge)

            volume_la = m.re.apply(m.train_proj, m.geom)

            t1 = tf.math.reduce_max(m.train_label)
            t2 = tf.math.reduce_max(volume_la)
            train_label_ = m.train_label * (254 / t1)
            volume_la_ = volume_la * (254 / t2)
            loss = tf.losses.mean_squared_error(train_label_, volume_la_)
            resultado_loss = sess.run(loss)
            print(resultado_loss)
            tf.summary.histogram("resultado_loss", resultado_loss)

            train_step = optimizer.minimize(loss)
            #train_step =  tf.contrib.layers.optimize_loss(loss, tf.train.get_global_step(), learning_rate=LEARNING_RATE, optimizer='Adam', summaries=["gradients"])
            #train_step = tf.contrib.slim.learning.create_train_op(loss, optimizer, summarize_gradients=True)

            sess.run(train_step)

            train_writer.add_summary(summary, i)

        log_dir = "/home/davi/Documentos/train/test_model_0/"
        write_png = png.writeSlice(m.test_vol[0],
                                   log_dir + 'slice_label_' + str(i) + '.png')
        sess.run(write_png)

    exit()

    save_path = '/home/davi/Documentos/train/model_%d/' % 0

    geom, angles = projtable.read(DATA_P + 'projMat.txt')
    reconstructor = ct.Reconstructor(CONF_LA,
                                     angles[0:15],
                                     DISPLACEMENT,
                                     trainable=True,
                                     name='LAReconstructor',
                                     weights_type=WEIGHTS_TYPE)

    volume_la = reconstructor.apply(train_proj, geom)
예제 #3
0
def test_model(validation_proj, validation_label, test_proj, test_label,
               save_path, log_dir, i):
    step = 0
    losses = []

    print("********* Iteracao: " + str(i) + "\n\n")

    # find checkpoint files
    checkpoints = []
    with open(save_path + 'checkpoint') as f:
        f = f.readlines()
        pattern = re.compile('all_model_checkpoint_paths:\s\"(.+)\"')
        for line in f:
            for match in re.finditer(pattern, line):
                checkpoints.append(match.groups()[0])

    with tf.Session(config=tf.ConfigProto(gpu_options=GPU_OPTIONS)) as sess:
        m = Model([test_proj], [test_label], [test_proj], [test_label], sess)

        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        #        write_png = png.writeSlice( m.test_label[0], log_dir + 'slice_label_'+str(i)+'.png' )
        #        sess.run(write_png)

        # compute volume before training and export central slice
        print('Computing volume ' + str(i) + ' without trained parameters')
        vol_before = m.test_vol
        #        write_png = png.writeSlice( vol_before[CONF.proj_shape.N // 2], log_dir + 'slice_before_training.png' )
        write_png = png.writeSlice(
            vol_before[31],
            log_dir + 'slice_before_training_' + str(i) + '.png')
        #        write_png_label = png.writeSlice( m.test_label[CONF.proj_shape.N // 2], log_dir + 'slice_label.png' )
        #        write_png_label = png.writeSlice( m.test_label[0], log_dir + 'slice_label.png' )

        #        vol_before_np, _, _, vol_label_np = sess.run( [vol_before, write_png, write_png_label, m.test_label] )
        vol_before_np, _ = sess.run([vol_before, write_png])

        # find best checkpoint
        #        print( 'Searching best checkpoint' )
        saver = tf.train.Saver()
        m.setTest(validation_proj, validation_label, sess)

        best_cp_i = 51
        best_cp_loss = math.inf

        #print("Hard Code for the best checkpoint 1102")

        print("\n\n**********************************")
        #print(checkpoints)
        print(checkpoints[best_cp_i])
        """
        for i, cp in enumerate( checkpoints ):
#        for i in range(300, 400):
            saver.restore( sess, checkpoints[i] )
            loss = sess.run( m.test_loss )
            print("Modelo: ")
            print(i)
            print("Loss: ")
            print(loss)

            if loss < best_cp_loss:
                best_cp_i = i
                best_cp_loss = loss
            print( '.', end = '', flush = True )
        """

        print('')
        print("Terminando de testar os modelos ********\n\n")

        print("Melhor loss: ")
        print(best_cp_i)
        saver.restore(sess, checkpoints[best_cp_i])
        #print("Loss do modelo " + str(best_cp_i))
        #loss = sess.run( m.test_loss )
        #print(loss)

        # load best model and set test volume
        print('Computing volume ' + str(i) + ' with trained parameters')
        m.setTest([test_proj], [test_label], sess)
        saver.restore(sess, checkpoints[best_cp_i])

        # compute volume after training and export central slice + dennerlein
        vol_after = m.test_vol
        #write_png = png.writeSlice( vol_after[CONF.proj_shape.N // 2], log_dir + 'slice_after_training.png' )
        write_png = png.writeSlice(
            vol_after[31], log_dir + 'slice_after_training_' + str(i) + '.png')
        write_dennerlein = dennerlein.write(
            log_dir + 'after_training_' + str(i) + '.bin', vol_after)
        vol_after_np, _, _ = sess.run([vol_after, write_png, write_dennerlein])

        coord.request_stop()
        coord.join(threads)
        sess.close()

    tf.reset_default_graph()

    return losses, step
예제 #4
0
def train_model(offset, save_path, resume):
    global LEARNING_RATE
    losses = []
    stop_crit = lambda l: np.median(l[:int(len(l) / 2)]) < np.median(l[int(
        len(l) / 2):])

    with tf.Session(config=tf.ConfigProto(gpu_options=GPU_OPTIONS)) as sess:
        #sets = split_train_validation_set( offset )
        sets = my_split_train_validation_set(
        )  # O primeiro elemento é 1 phantom para validação

        m = Model(*sets, sess)

        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        c = lambda l: stop_crit(l) if len(l) >= TRACK_LOSS else False

        saver = tf.train.Saver(max_to_keep=TRACK_LOSS)

        if resume:
            cp = tf.train.latest_checkpoint(save_path)
            if cp:
                print('Restoring session')
                saver.restore(sess, cp)

        write_png = png.writeSlice(
            m.test_vol[0],
            '/home/davi/Documentos/train/test_model_0/test_label_antes_treinamento.png'
        )
        sess.run(write_png)

        train_writer = tf.summary.FileWriter('./logs/1/train ', sess.graph)
        count_all = 0
        try:
            while 1:  #not coord.should_stop() and not c( losses ):
                for i in range(TEST_EVERY):
                    count_all += 1
                    sess.run(m.train_op[0])
                    sess.run(m.train_op[1])

                lv, step = sess.run([m.test_loss, tf.train.get_global_step()])
                sess.run(m.test_loss_summary)

                #print("Step: " + str(step))
                #print("Loss: " + str(lv))
                print('Step %d; Loss: %f' % (step, lv), flush=True)
                print(lv, flush=True)

                #tf.summary.histogram("loss_validacao", lv)

                merge = tf.summary.merge_all()
                summary = sess.run(merge)
                train_writer.add_summary(summary, count_all)
                #                if step == 17:
                #                    print("\nAbaixando a LARARNING_RATE \n")
                #                    LEARNING_RATE /= 10

                #write_png = png.writeSlice( m.test_vol[0], '/media/davi/526E10CC6E10AAAD/mestrado_davi/train/test_model_0/test_vol__.png' )
                #sess.run(write_png)

                losses.append(lv)
                if len(losses) > TRACK_LOSS:
                    del losses[0]

                saver.save(sess, save_path + 'model', global_step=step)

        except tf.errors.OutOfRangeError:
            print('Done.')
        finally:

            write_png = png.writeSlice(
                m.test_vol[0],
                '/home/davi/Documentos/train/test_model_0/test_label_depois_treinamento.png'
            )
            sess.run(write_png)

            coord.request_stop()

        coord.join(threads)
        sess.close()

    tf.reset_default_graph()

    return losses, step
예제 #5
0
def my_train():

    EPOCHS = 10
    BATCH_SIZE = 16

    train_proj = [
        "/home/davi/Documentos/ConeDeepLearningCT2/phantoms/lowdose/binary0.proj.bin",
    ]

    train_label = [
        "/home/davi/Documentos/ConeDeepLearningCT2/phantoms/lowdose/binary0.vol.bin",
    ]

    save_path = '/home/davi/Documentos/train/model_%d/' % 0
    with tf.Session() as sess:

        train_list = []
        label_list = []
        for i in range(len(train_proj)):

            train_list_ = sess.run(dennerlein.read_noqueue(train_proj[i]))
            train_list.append(train_list_)

            label_list_ = sess.run(dennerlein.read_noqueue(train_label[i]))
            label_list.append(label_list_)

    geom, angles = projtable.read(DATA_P + 'projMat.txt')
    # Beleza, temos as amostras em train_proj e os labels em train_label

    BATCH_SIZE = 1
    features, labels = (train_list, label_list)
    dataset = tf.data.Dataset.from_tensor_slices(
        (np.asarray(features), np.asarray(labels))).repeat().batch(BATCH_SIZE)
    iter = dataset.make_one_shot_iterator()
    x, y = iter.get_next()

    EPOCHS = 100000
    with tf.Session(config=tf.ConfigProto(gpu_options=GPU_OPTIONS)) as sess:

        global LEARNING_RATE
        # Ajeita as coisas para printar um slice antes de comçar a treinar
        re = ct.Reconstructor(CONF_LA,
                              angles[0:LIMITED_ANGLE_SIZE],
                              DISPLACEMENT,
                              name='LAReconstructor',
                              weights_type=WEIGHTS_TYPE)

        volume_la = re.apply(x, geom)

        # Esse reconstructor será utilizado para corrigir a imagem durante as EPOCHS do treinamento
        re = ct.Reconstructor(CONF_LA,
                              angles[0:LIMITED_ANGLE_SIZE],
                              DISPLACEMENT,
                              trainable=True,
                              name='LAReconstructor',
                              weights_type=WEIGHTS_TYPE)

        if not tf.train.get_global_step():
            tf.train.create_global_step()

        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        saver = tf.train.Saver(max_to_keep=TRACK_LOSS)

        # Printa
        write_png = png.writeSlice(
            volume_la[0],
            '/home/davi/Documentos/train/test_model_0/slice_qualquer_antes_treinamento.png'
        )
        sess.run(write_png)

        try:
            for i in range(EPOCHS):

                #				if i % 30 == 0:
                #					print("Abaixando a LEARNING RATE")
                #					LEARNING_RATE /= 10

                proj = train_list[0]
                label = label_list[0]

                train_step = tf.no_op()

                volume_la = re.apply(x, geom)
                volume_la = tf.expand_dims(volume_la, axis=0)

                t1 = tf.math.reduce_max(y)
                t2 = tf.math.reduce_max(volume_la)
                label_ = y * (255 / t1)
                volume_la_ = volume_la * (255 / t2)

                # Add a extension to the tensor
                write_png = png.writeSlice(
                    volume_la_[0][0],
                    '/home/davi/Documentos/train/test_model_0/slice_qualquer_depois_treinamento.png'
                )
                sess.run(write_png)

                loss = tf.losses.mean_squared_error(label_, volume_la_)

                with tf.control_dependencies([train_step]):
                    gstep = tf.train.get_global_step()

                    train_step = tf.train.GradientDescentOptimizer(
                        LEARNING_RATE).minimize(
                            loss,
                            colocate_gradients_with_ops=True,
                            global_step=gstep)

                step = sess.run(tf.train.get_global_step())
                if step % 10:
                    print("Salvando o modelo")
                    saver.save(sess, save_path + 'model', global_step=step)

                # Treinando
                print("Treinando")
                sess.run(train_step)

        except tf.errors.OutOfRangeError:
            print('Done.')
        finally:

            coord.request_stop()
예제 #6
0
def test_model(validation_proj, validation_label, test_proj, test_label,
               save_path, log_dir):
    step = 0
    losses = []

    # find checkpoint files
    checkpoints = []
    with open(save_path + 'checkpoint') as f:
        f = f.readlines()
        pattern = re.compile('all_model_checkpoint_paths:\s\"(.+)\"')
        for line in f:
            for match in re.finditer(pattern, line):
                checkpoints.append(match.groups()[0])

    with tf.Session(config=tf.ConfigProto(gpu_options=GPU_OPTIONS)) as sess:
        m = Model([test_proj], [test_label], [test_proj], [test_label], sess)

        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        # compute volume before training and export central slice
        print('Computing volume without trained parameters')
        vol_before = m.test_vol
        write_png = png.writeSlice(vol_before[CONF.proj_shape.N // 2],
                                   log_dir + 'slice_before_training.png')
        write_png_label = png.writeSlice(m.test_label[CONF.proj_shape.N // 2],
                                         log_dir + 'slice_label.png')
        vol_before_np, _, _, vol_label_np = sess.run(
            [vol_before, write_png, write_png_label, m.test_label])

        # find best checkpoint
        print('Searching best checkpoint')
        saver = tf.train.Saver()
        m.setTest(validation_proj, validation_label, sess)

        best_cp_i = 0
        best_cp_loss = math.inf

        for i, cp in enumerate(checkpoints):
            saver.restore(sess, cp)
            loss = sess.run(m.test_loss)

            if loss < best_cp_loss:
                best_cp_i = i
                best_cp_loss = loss
            print('.', end='', flush=True)
        print('')

        # load best model and set test volume
        print('Computing volume with trained parameters')
        m.setTest([test_proj], [test_label], sess)
        saver.restore(sess, checkpoints[best_cp_i])

        # compute volume after training and export central slice + dennerlein
        vol_after = m.test_vol
        write_png = png.writeSlice(vol_after[CONF.proj_shape.N // 2],
                                   log_dir + 'slice_after_training.png')
        write_dennerlein = dennerlein.write(log_dir + 'after_training.bin',
                                            vol_after)
        vol_after_np, _, _ = sess.run([vol_after, write_png, write_dennerlein])

        coord.request_stop()
        coord.join(threads)
        sess.close()

    tf.reset_default_graph()

    return losses, step