Esempio n. 1
0
def eval_smooth_show(config_obj):
    test_preprocess_obj=data_preprocessing.test_preprocess.test_preprocess(config_obj.tfrecord_test_addr, config_obj.class_num)
    net_name=config_obj.net_type
    test_net_obj=None
    if net_name=='vgg16':
        test_net_obj=net.vgg16.vgg16(False, 'vgg16', config_obj.class_num)
    elif net_name=='mobilenet_v2':
        test_net_obj=net.mobilenet_v2.mobilenet_v2(True, 'mobilenet_v2', config_obj.class_num)

    accu_name=config_obj.accuracy_type
    accu_obj=None
    if accu_name=='default':
        accu_obj=default_accuracy.default_accuracy()
    elif accu_name=='multi':
        accu_obj=multi_accuracy.multi_accuracy()

    images_test, labels_test = test_preprocess_obj.def_preposess()
    net_test = test_net_obj.def_net(images_test)
    inputs=tf.sigmoid(net_test)
    predict=tf.cast(inputs> 0.5, tf.float32)
    accuracy_perfect, accuracy, precision, recall, acc_list, pre_list, rec_list = accu_obj.def_accuracy(net_test, labels_test)

    saver = tf.train.Saver()
    img_mean = utils.global_var.means
    with tf.Session() as sess:
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)
        saver.restore(sess, config_obj.result_addr)

        accuracy_prefect_v, accuracy_v, precision_v, recall_v, acc_list_v, pre_list_v, rec_list_v, predict_v, labels_test_v, images_test_v = sess.run([accuracy_perfect, accuracy, precision, recall, acc_list, pre_list, rec_list, predict, labels_test, images_test])
        print('accuracy_prefect_v: %s' % accuracy_prefect_v)
        print('accuracy_v: ', accuracy_v)
        print('precision:' , precision_v)
        print('recall:' , recall_v)
        print('acc_list_v:' , acc_list_v)
        print('pre_list_v:' , pre_list_v)
        print('rec_list_v:' , rec_list_v)
        print('len(predict_V):', len(predict_v))
        for k in range(len(predict_v)):
            #print(labels_test_v[k])
            mat_show=[]
            for i in range(len(predict_v[k])):
                if predict_v[k][i] == 1:
                    mat_show.append(data_scraping.materil_name.material_list[i])
                    print(str(data_scraping.materil_name.material_list[i]))
            print(predict_v[k])
            show_img=images_test_v[k]
            show_img=show_img+img_mean
            show_img = abs(show_img) / 256.0
            plt.imshow(show_img)
            zhfont = matplotlib.font_manager.FontProperties(fname='/usr/share/fonts/opentype/noto/NotoSansCJK-Bold.ttc')
            for i in range(len(mat_show)):
                plt.text(150, 25*(i+1), str(mat_show[i]), fontproperties = zhfont, fontsize=15, color='red')
            plt.show()

        coord.request_stop()
        coord.join(threads)
y_temp = net_obj.def_net(x_data)

# y = tf.nn.sigmoid(net_obj.def_net(x_data))

y = tf.nn.sigmoid(y_temp)

# 通过预测的y值与真实的y_data值进行对比得出误差
loss = tf.reduce_mean(
    tf.nn.sigmoid_cross_entropy_with_logits(logits=y, labels=y_data))
# 定义训练时采用的算法
optmizer = tf.train.GradientDescentOptimizer(learning_rate)
train = optmizer.minimize(loss)
acc = tf.reduce_mean(tf.cast(tf.abs(y - y_data) < 0.1, tf.float32))

obj_acc = multi_accuracy.multi_accuracy()
acc_total, accu, precision, recall, acc_list, precision_list, recall_list = obj_acc.def_accuracy(
    y_temp, y_data)
obj_de_acc = default_accuracy.default_accuracy()
acc_default = obj_de_acc.def_accuracy(y_temp, y_data)
saver = tf.train.Saver()

with tf.Session() as sess:
    file_writer = tf.summary.FileWriter('./logs_simple', sess.graph)
    if not is_training:
        saver.restore(sess, sess_path)
    else:
        # 初始化相关变量
        init = tf.global_variables_initializer()
        sess.run(init)
    # 训练无数次,每训练1000次,输出一次训练的结果
Esempio n. 3
0
def eval_smooth(config_name, repeat_time):
    config_name = config_name
    print('choose config: ' + config_name)
    config_obj = None
    if config_name == 'chamo':
        config_obj = config.chamo.get_config()
    elif config_name == 'chamo_full_run':
        config_obj = config.chamo_full_run.get_config()

    test_preprocess_obj = data_preprocessing.test_preprocess.test_preprocess(config_obj.tfrecord_test_addr,
                                                                             config_obj.class_num)
    net_name = config_obj.net_type
    test_net_obj = None
    if net_name == 'vgg16':
        test_net_obj = net.vgg16.vgg16(False, 'vgg16', config_obj.class_num)
    elif net_name == 'mobilenet_v2':
        test_net_obj = net.mobilenet_v2.mobilenet_v2(True, 'mobilenet_v2', config_obj.class_num)

    accu_name = config_obj.accuracy_type
    accu_obj = None
    if accu_name == 'default':
        accu_obj = default_accuracy.default_accuracy()
    elif accu_name == 'multi':
        accu_obj = multi_accuracy.multi_accuracy()

    images_test, labels_test = test_preprocess_obj.def_preposess()
    net_test = test_net_obj.def_net(images_test)
    inputs = tf.sigmoid(net_test)
    predict = tf.cast(inputs > 0.5, tf.float32)
    accuracy_perfect, accuracy, precision, recall, f1, acc_list, \
        pre_list, pre_list_nume, pre_list_deno, rec_list, \
        rec_list_nume, rec_list_deno = accu_obj.def_accuracy(net_test, labels_test)

    saver = tf.train.Saver()
    with tf.Session() as sess:
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)
        saver.restore(sess, config_obj.result_addr)

        # len_all = len(labels_test)
        each_size = labels_test.get_shape().as_list()[0]
        len_all = labels_test.get_shape().as_list()[1]

        acc_perfect_all = 0.0
        acc_all = 0.0
        precision_all = 0.0
        recall_all = 0.0

        acc_list_all = np.zeros(shape=[len_all], dtype=np.float32)
        precision_all_nume = np.zeros(shape=[len_all], dtype=np.float32)
        precision_all_deno = np.zeros(shape=[len_all], dtype=np.float32)
        precision_list_all = np.zeros(shape=[len_all], dtype=np.float32)
        recall_all_nume = np.zeros(shape=[len_all], dtype=np.float32)
        recall_all_deno = np.zeros(shape=[len_all], dtype=np.float32)
        recall_list_all = np.zeros(shape=[len_all], dtype=np.float32)

        for repeat_i in range(1, repeat_time+1):
            accuracy_perfect_v, accuracy_v, precision_v, recall_v, acc_list_v, pre_list_nume_v, pre_list_deno_v, \
            rec_list_nume_v, rec_list_deno_v, predict_v, labels_test_v, images_test_v = sess.run(
                [accuracy_perfect, accuracy, precision, recall, acc_list,
                 pre_list_nume, pre_list_deno, rec_list_nume, rec_list_deno, predict, labels_test,
                 images_test])

            acc_perfect_all = acc_perfect_all + accuracy_perfect_v
            acc_all = acc_all + accuracy_v
            precision_all = precision_all + precision_v
            recall_all = recall_all + recall_v

            acc_list_all = np.nan_to_num(acc_list_all) + acc_list_v
            precision_all_nume = precision_all_nume + pre_list_nume_v
            precision_all_deno = precision_all_deno + pre_list_deno_v
            recall_all_nume = recall_all_nume + rec_list_nume_v
            recall_all_deno = recall_all_deno + rec_list_deno_v

            repeat_i = float(repeat_i)
            print('step: %d total pictures: %d' % (repeat_i, each_size*repeat_i))
            print('accuracy_prefect_v:', acc_perfect_all/repeat_i)
            print('accuracy_v: ', acc_all/repeat_i)
            print('precision:', precision_all/repeat_i)
            print('recall:', recall_all/repeat_i)
            print('acc_list_v:', acc_list_all/repeat_i)
            print('pre_list_v:', precision_all_nume/precision_all_deno)
            print('rec_list_v:', recall_all_nume/recall_all_deno)

        coord.request_stop()
        coord.join(threads)
    repeat_time = float(repeat_time)
    acc_perfect_all = acc_perfect_all/repeat_time
    acc_all = acc_all/repeat_time
    precision_all = precision_all/repeat_time
    recall_all = recall_all/repeat_time
    acc_list_all = acc_list_all/repeat_time
    precision_list_all = precision_all_nume/precision_all_deno
    recall_list_all = recall_all_nume/recall_all_deno
    return acc_perfect_all, acc_all, precision_all, recall_all, acc_list_all, precision_list_all, recall_list_all
def eval_smooth_show(config_obj, one_pic_path=None, isDeconv=False):
    test_preprocess_obj = data_preprocessing.test_preprocess.test_preprocess(
        config_obj.tfrecord_test_addr, config_obj.class_num)
    net_name = config_obj.net_type
    test_net_obj = None
    if net_name == 'vgg16':
        test_net_obj = net.vgg16.vgg16(False, 'vgg16', config_obj.class_num)
    elif net_name == 'mobilenet_v2':
        test_net_obj = net.mobilenet_v2.mobilenet_v2(False, 'mobilenet_v2',
                                                     config_obj.class_num)

    accu_name = config_obj.accuracy_type
    accu_obj = None
    if accu_name == 'default':
        accu_obj = default_accuracy.default_accuracy()
    elif accu_name == 'multi':
        accu_obj = multi_accuracy.multi_accuracy()

    images_test, labels_test = test_preprocess_obj.def_preposess(batch_size=1)
    net_test = test_net_obj.def_net(images_test)
    inputs = tf.sigmoid(net_test)
    predict = tf.cast(inputs > 0.1, tf.float32)
    accuracy_TOTAL = accu_obj.def_accuracy(net_test, labels_test)

    saver = tf.train.Saver()
    img_mean = utils.global_var.means
    sess = tf.Session()

    writer = tf.summary.FileWriter("Log/", tf.get_default_graph())
    with sess.as_default():
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)
        saver.restore(sess, config_obj.result_addr)

        [accu_TOTAL, predict_v, labels_test_v, images_test_v] \
            = sess.run([accuracy_TOTAL, predict, labels_test, images_test])
        print('len(predict_V):', len(predict_v))
        for k in range(len(predict_v)):
            #print(labels_test_v[k])
            mat_show = []
            for i in range(len(predict_v[k])):
                if predict_v[k][i] == 1:
                    mat_show.append(
                        data_scraping.materil_name_73.material_list[i])
                    print(str(data_scraping.materil_name_73.material_list[i]))
            print(predict_v[k])
            show_img = images_test_v[k]
            show_img = show_img + img_mean
            show_img = abs(show_img) / 256.0
            plt.imshow(show_img)
            # zhfont = matplotlib.font_manager.FontProperties(
            # fname='/usr/share/fonts/opentype/noto/NotoSansCJK-Bold.ttc')
            for i in range(len(mat_show)):
                plt.text(150,
                         25 * (i + 1),
                         str(mat_show[i]),
                         fontproperties='Simhei',
                         fontsize=15,
                         color='red')
            plt.show()

        coord.request_stop()
        coord.join(threads)
    writer.close()
    if isDeconv:
        #show_cnnvis(sess, feed_dict={X: image_list, Y: tempY}, input_tensor=images_test)
        show_cnnvis(sess, feed_dict={}, input_tensor=images_test)
def eval_reconstruct_slim(config_obj, one_pic_path):
    '''
    对slim的图片预处理进行了重现
    :param config_obj:
    :param one_pic_path:
    :return:
    '''
    net_name = config_obj.net_type
    test_net_obj = None
    if net_name == 'vgg16':
        test_net_obj = net.vgg16.vgg16(False, 'vgg16', config_obj.class_num)
    elif net_name == 'mobilenet_v2':
        test_net_obj = net.mobilenet_v2.mobilenet_v2(False, 'mobilenet_v2',
                                                     config_obj.class_num)

    accu_name = config_obj.accuracy_type
    accu_obj = None
    if accu_name == 'default':
        accu_obj = default_accuracy.default_accuracy()
    elif accu_name == 'multi':
        accu_obj = multi_accuracy.multi_accuracy()

    images_test_p = tf.placeholder(shape=[config_obj.batchsize, 224, 224, 3],
                                   dtype=tf.float32)
    labels_test_p = tf.placeholder(
        shape=[config_obj.batchsize, config_obj.class_num], dtype=tf.float32)
    images_test, labels_test = read_a_pic_reconstruct_slim(
        one_pic_path, config_obj.class_num)
    net_test = test_net_obj.def_net(images_test_p)
    inputs = tf.sigmoid(net_test)
    predict = tf.cast(inputs > 0.1, tf.float32)
    accuracy_TOTAL = accu_obj.def_accuracy(net_test, labels_test_p, 0.5)

    saver = tf.train.Saver()
    img_mean = utils.global_var.means
    sess = tf.Session()

    writer = tf.summary.FileWriter("Log/", tf.get_default_graph())
    with sess.as_default():
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)
        saver.restore(sess, config_obj.result_addr)
        feed_dict = {images_test_p: images_test, labels_test_p: labels_test}

        [accu_TOTAL, predict_v] \
            = sess.run([accuracy_TOTAL, predict], feed_dict=feed_dict)
        print('len(predict_V):', len(predict_v))
        for k in range(len(predict_v)):
            # print(labels_test_v[k])
            mat_show = []
            for i in range(len(predict_v[k])):
                if predict_v[k][i] == 1:
                    mat_show.append(
                        data_scraping.materil_name_73.material_list[i])
                    print(str(data_scraping.materil_name_73.material_list[i]))
            print(predict_v[k])
            show_img = images_test[k]
            show_img = np.array(show_img)
            # show_img = show_img + img_mean
            # show_img = abs(show_img) / 256.0
            # show_img = map(abs, show_img)
            # show_img = [i/256.0 for i in show_img]
            # plt.imshow(show_img)
            plt.imshow(show_img)
            # zhfont = matplotlib.font_manager.FontProperties(
            # fname='/usr/share/fonts/opentype/noto/NotoSansCJK-Bold.ttc')
            for i in range(len(mat_show)):
                plt.text(150,
                         25 * (i + 1),
                         str(mat_show[i]),
                         fontproperties='Simhei',
                         fontsize=15,
                         color='red')
            plt.show()

        coord.request_stop()
        coord.join(threads)
    writer.close()
    show_cnnvis(sess, feed_dict=feed_dict, input_tensor=images_test_p)
def eval_smooth_divide(config_obj, des_path, repeat_time):
    print('read tfrecord from:', config_obj.tfrecord_test_addr)
    RIGHT_PATH = des_path + 'right/'
    PART_PATH = des_path + 'part/'
    ERROR_PATH = des_path + 'error/'
    if not os.path.exists(des_path):
        os.makedirs(des_path)
    if not os.path.exists(RIGHT_PATH):
        os.makedirs(RIGHT_PATH)
    if not os.path.exists(PART_PATH):
        os.makedirs(PART_PATH)
    if not os.path.exists(ERROR_PATH):
        os.makedirs(ERROR_PATH)

    test_preprocess_obj = data_preprocessing.test_preprocess.test_preprocess(
        config_obj.tfrecord_test_addr, config_obj.class_num)
    net_name = config_obj.net_type
    test_net_obj = None
    if net_name == 'vgg16':
        test_net_obj = net.vgg16.vgg16(False, 'vgg16', config_obj.class_num)
    elif net_name == 'mobilenet_v2':
        test_net_obj = net.mobilenet_v2.mobilenet_v2(False, 'mobilenet_v2',
                                                     config_obj.class_num)

    accu_name = config_obj.accuracy_type
    accu_obj = None
    if accu_name == 'default':
        accu_obj = default_accuracy.default_accuracy()
    elif accu_name == 'multi':
        accu_obj = multi_accuracy.multi_accuracy()

    images_test, labels_test = test_preprocess_obj.def_preposess()
    labels_right = labels_test
    net_test = test_net_obj.def_net(images_test)
    inputs = tf.sigmoid(net_test)
    predict = tf.cast(inputs > 0.5, tf.float32)
    accuracy_TOTAL = accu_obj.def_accuracy(net_test, labels_test)

    saver = tf.train.Saver()
    img_mean = utils.global_var.means
    sess = tf.Session()
    with sess.as_default():
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)
        saver.restore(sess, config_obj.result_addr)
        batch_size = 0
        for repeat_time_i in range(repeat_time):
            [accu_TOTAL, predict_v, labels_test_v, images_test_v
             ] = sess.run([accuracy_TOTAL, predict, labels_right, images_test])
            print('len(predict_V):', len(predict_v))
            batch_size = len(predict_v)
            for k in range(len(predict_v)):
                # print(labels_test_v[k])
                mat_predict = []
                mat_right = []
                for i in range(len(predict_v[k])):
                    if predict_v[k][i] == 1:
                        mat_predict.append(
                            data_scraping.materil_name_73.material_list[i])
                        print(
                            str(data_scraping.materil_name_73.material_list[i])
                        )
                print('===========')
                for m in range(len(labels_test_v[k])):
                    if labels_test_v[k][m] == 1:
                        mat_right.append(
                            data_scraping.materil_name_73.material_list[m])
                        print(
                            str(data_scraping.materil_name_73.material_list[m])
                        )
                print(predict_v[k])
                show_img = images_test_v[k]
                show_img = show_img + img_mean
                show_img = abs(show_img) / 256.0
                ret_num = is_right(predict_v[k], labels_test_v[k])
                if ret_num == RIGHT:
                    tag_pic_and_save(show_img, mat_predict, RIGHT_PATH,
                                     mat_right)
                elif ret_num == PART:
                    tag_pic_and_save(show_img, mat_predict, PART_PATH,
                                     mat_right)
                else:
                    tag_pic_and_save(show_img, mat_predict, ERROR_PATH,
                                     mat_right)
                #plt.show()
        coord.request_stop()
        coord.join(threads)
        print('eval done, batch_size: %d; repeat_time: %d; total eval: %d' %
              (batch_size, repeat_time, batch_size * repeat_time))