def tower_loss(xyzs, feats, labels, is_training, reuse=False):
    with tf.variable_scope(tf.get_variable_scope(), reuse=reuse):
        xyzs, dxyzs, feats, labels, vlens, vbegs, vcens = \
            points_pooling_two_layers(xyzs,feats,labels,voxel_size1=0.15,voxel_size2=0.45,block_size=3.0)
        global_feats, local_feats = pointnet_10_concat_pre(
            xyzs, dxyzs, feats, vlens, vbegs, vcens, reuse)

        feats = tf.split(global_feats, feats_dims, axis=1)
        feats_sort = [
            feats[0], feats[2], feats[1], feats[4], feats[3], feats[6],
            feats[5]
        ]
        print global_feats.shape
        if FLAGS.compare_type == 'pop':
            feats.pop(FLAGS.pop_id)
            global_feats = tf.concat(feats[FLAGS.pop_id], axis=1)
        elif FLAGS.compare_type == 'stage':
            global_feats = tf.concat([feats[1], feats[3], feats[5]], axis=1)
        elif FLAGS.compare_type == 'fc':
            global_feats = tf.concat([feats[0], feats[2], feats[4], feats[6]],
                                     axis=1)
        elif FLAGS.compare_type == 'sort_ablation':
            global_feats = tf.concat(feats_sort[FLAGS.pop_id:], axis=1)
        else:
            global_feats = tf.concat(feats[FLAGS.pop_id:], axis=1)

        print global_feats.shape
        global_feats = tf.expand_dims(global_feats, axis=0)
        logits = classifier_v2(global_feats,
                               is_training,
                               FLAGS.num_classes,
                               reuse,
                               use_bn=False)  # [1,pn,num_classes]

        flatten_logits = tf.reshape(
            logits, [-1, FLAGS.num_classes])  # [pn,num_classes]

        # loss
        labels_flatten = tf.reshape(labels, [-1, 1])  # [pn,1]
        labels_flatten = tf.squeeze(labels_flatten, axis=1)  # [pn]
        train_weights_tf = tf.Variable(train_weights,
                                       trainable=False,
                                       name='train_weights')
        weights = tf.gather(train_weights_tf, labels_flatten)
        loss = tf.losses.sparse_softmax_cross_entropy(labels_flatten,
                                                      flatten_logits,
                                                      weights=weights)

    tf.summary.scalar(loss.op.name, loss)

    return loss, flatten_logits, labels
Beispiel #2
0
def pgnet_nse_whole(xyzs, feats, labels, reuse, is_training, num_classes):

    with tf.variable_scope(tf.get_variable_scope(), reuse=reuse):
        xyzs, dxyzs, feats, labels, vlens, vbegs, vcens = \
            points_pooling_three_layers(xyzs,feats,labels,voxel_size1=0.1,voxel_size2=0.2,voxel_size3=0.4,block_size=3.0)
        graph_feats = pgnet_nse(xyzs,
                                feats,
                                vlens,
                                vbegs,
                                vcens,
                                radius=[0.1, 0.2, 0.4, 0.8],
                                reuse=reuse)
        graph_feats = tf.expand_dims(graph_feats, axis=1)

        logits = classifier_v2(graph_feats,
                               is_training,
                               num_classes,
                               reuse,
                               use_bn=False)  # [1,pn,num_classes]

    return logits, xyzs, labels
Beispiel #3
0
        crop_fig = fig_data[(h - 16):(h + 16), (w - 16):(w + 16), :]
        crop_fig = transform.resize(crop_fig, [64, 64, 3], mode='reflect')

        io.imsave(tmp_path + '{0}_{1}.jpg'.format(label_count, count_a),
                  crop_fig)
        count_a += 1
        point_list.append(i)

#===================restore convert network ============

check_point_list = point_list
check_num = len(check_point_list)
check_right_list = []  #check which is the positive

x_node = tf.placeholder(tf.float32, [1, 64, 64, 3])
y_ori_predict = classifier_v2(x_node, name='v2')
#y_down1_predict = classifier_down1(x_node, name='v2_down1')
y_predict = y_ori_predict  #+ y_down1_predict

sess = tf.Session()
sess.run(tf.global_variables_initializer())
#=============sess saver restor==========
saver = tf.train.Saver()
saver.restore(sess, ckpt_path)
fig_right_name_list = []
for i in tqdm(range(check_num)):  # first time
    fig_train = io.imread(tmp_path + label_count + '_{}.jpg'.format(i))
    mean_fig = np.mean(fig_train)
    std_fig = np.std(fig_train)
    fig_train = (fig_train - mean_fig) / std_fig
    #    if fig_train.shape[0] != 64 or fig_train.shape[1] != 64:
anotate_path = './result/anotate_figs/'
process_dir = './data/process/'
#label_count = 'aq_22'

if not os.path.exists(anotate_path):
    os.makedirs(anotate_path)
if not os.path.exists(check_path):
    os.makedirs(check_path)
if not os.path.exists(tmp_path):
    os.makedirs(tmp_path)
if not os.path.exists(process_dir):
    os.makedirs(process_dir)    
    
    
x_node = tf.placeholder(tf.float32, [1, 64, 64, 3])
y_predict = classifier_v2(x_node)    

sess_build = tf.Session()
sess_build.run(tf.global_variables_initializer())
#=============sess saver restor==========
saver = tf.train.Saver()
saver.restore(sess_build, ckpt_path)   
    
tmp_figs_path = glob.glob(tmp_path+'/*.jpg')

for i in tqdm(range(len(tmp_figs_path))):
    base_name = os.path.basename(tmp_figs_path[i])
    fig_i = imageio.imread(tmp_figs_path[i])
    #preprocess
    mean_i = np.mean(fig_i)
    std_i = np.std(fig_i)
Beispiel #5
0
        test_label_data.append(i)
        
data_num = len(fig_data)        
shuffle = [i for i in range(data_num)] 
np.random.shuffle(shuffle)

batch_idx = data_num // batch_size
#===============build model==============
x_ori_node = tf.placeholder(tf.float32, [batch_size, 64, 64, 3])

y_node = tf.placeholder(tf.int32, [batch_size])

x_check_node = tf.placeholder(tf.float32, [1, 64, 64, 3])

#y_predict = classifier_v2(x_node) # ============new method====
y_ori_predict = classifier_v2(x_ori_node, reuse=False, name='v2')
#y_down1_predict = classifier_down1(x_ori_node, reuse=False, name='v2_down1')
y_predict = y_ori_predict

y_ori_check = classifier_v2(x_check_node, reuse=True, name='v2') # every epoch , check the accuracy
#y_down1_check = classifier_down1(x_check_node, reuse=True, name='v2_down1')
y_check = y_ori_check
y_check_list = []

loss_ori = tf.losses.sparse_softmax_cross_entropy(labels=y_node, logits=y_ori_predict)


var_list = tf.trainable_variables()
var_ori_list = [i for i in var_list if 'v2' in i.name]
opt_ori = tf.train.AdamOptimizer(0.0001)
opt_ori = opt_ori.minimize(loss_ori, var_list=var_ori_list)
from skimage import transform, measure, color, exposure, io, morphology
import glob
import tensorflow as tf
from model import class_ori, classifier_v2
import os

confirm_dir = './data/enhance/*.jpg'
result_dir = './data/confirm/'
ckpt_path = './checkpoints/cell_100.ckpt'
fig_list = glob.glob(confirm_dir)

if not os.path.exists(result_dir):
    os.makedirs(result_dir)

x = tf.placeholder(tf.float32, [1, 64, 64, 3])
y = classifier_v2(x, name='v2')

sess = tf.Session()
saver = tf.train.Saver()
init = tf.global_variables_initializer()

sess.run(init)
saver.restore(sess, ckpt_path)

for i in fig_list:
    fig = io.imread(i)
    fig_name = os.path.basename(i)
    mean = np.mean(fig)
    std = np.std(fig)
    fig_pro = (fig - mean) / std
    fig_pro = np.reshape(fig_pro, [1, 64, 64, 3])