예제 #1
0
def load_dataset(dataset_path,filename):
    '''
    加载人脸数据库
    :param dataset_path: embedding.npy文件(faceEmbedding.npy)
    :param filename: labels文件路径路径(name.txt)
    :return:
    '''
    embeddings=np.load(dataset_path)
    names_list=file_processing.read_data(filename,split=None,convertNum=False)
    return embeddings,names_list
def load_dataset(dataset_path, filename):
    '''
    加载人脸数据库
    :param dataset_path: embedding.npy文件(faceEmbedding.npy)
    :param filename: labels文件路径路径(name.txt)
    :return:
    '''
    compare_emb = np.load(dataset_path)
    names_list = file_processing.read_data(filename, split=False)
    return compare_emb, names_list
예제 #3
0
def label_test(image_dir, filename, class_names):
    basename = os.path.basename(filename)[:-len('.txt')] + ".bmp"
    image_path = os.path.join(image_dir, basename)
    image = image_processing.read_image_gbk(image_path)
    data = file_processing.read_data(filename, split=" ")
    label_list, rect_list = file_processing.split_list(data, split_index=1)
    label_list = [l[0] for l in label_list]
    name_list = file_processing.decode_label(label_list, class_names)
    image_processing.show_image_rects_text("object2", image, rect_list,
                                           name_list)
def read_pair_data(filename, split=True):
    content_list = file_processing.read_data(filename)
    if split:
        content_list = np.asarray(content_list)
        faces_list1 = content_list[:, :1].reshape(-1)
        faces_list2 = content_list[:, 1:2].reshape(-1)
        # convert to 0/1
        issames_data = np.asarray(content_list[:, 2:3].reshape(-1),
                                  dtype=np.int)
        issames_data = np.where(issames_data > 0, 1, 0)
        faces_list1 = faces_list1.tolist()
        faces_list2 = faces_list2.tolist()
        issames_data = issames_data.tolist()
        return faces_list1, faces_list2, issames_data
    return content_list
def label_test(image_dir, filename, class_names=None):
    basename = os.path.basename(filename)[:-len('.txt')] + ".jpg"
    image_path = os.path.join(image_dir, basename)
    image = image_processing.read_image(image_path)
    data = file_processing.read_data(filename, split=" ")
    label_list, rect_list = file_processing.split_list(data, split_index=1)
    label_list = [l[0] for l in label_list]
    if class_names:
        name_list = file_processing.decode_label(label_list, class_names)
    else:
        name_list = label_list
    show_info = ["id:" + str(n) for n in name_list]
    rgb_image = image_processing.show_image_rects_text("object2",
                                                       image,
                                                       rect_list,
                                                       show_info,
                                                       color=(0, 0, 255),
                                                       drawType="custom",
                                                       waitKey=1)
    rgb_image = image_processing.resize_image(rgb_image, 900)
    image_processing.cv_show_image("object2", rgb_image)
예제 #6
0
import os.path as osp
import hashlib
import os

md5sum = hashlib.md5

from utils import file_processing


def create_md5sum(image_dir, totle, md5sum_file):
    with open(md5sum_file, 'w') as f:
        for line in totle:
            image_name = line[0]
            image_path = os.path.join(image_dir, image_name)
            if not os.path.exists(image_path):
                print("no path:{}".format(image_path))
            img = open(image_path, 'rb').read()
            md5 = md5sum(img).hexdigest()
            f.write(image_name + ' ' + str(md5) + '\n')
            print(image_name + ' ' + str(md5))


if __name__ == "__main__":
    filename = "/media/dm/dm/project/dataset/COCO/HumanPose/teacher_2D_pose_estimator/list/val.txt"
    image_dir = "/media/dm/dm/project/dataset/COCO/HumanPose/teacher_2D_pose_estimator/list/val"
    md5sum_file = "val_md5sum.txt"
    data = file_processing.read_data(filename)
    create_md5sum(image_dir, data, md5sum_file)
 def read_label(filename):
     boxes_label_lists = file_processing.read_data(filename)
     return boxes_label_lists
예제 #8
0
    images_list = glob.glob(os.path.join(image_dir, '*.jpg'))
    for image_path in images_list:
        im = image_processing.read_image(image_path,
                                         image_height,
                                         image_width,
                                         normalization=True)
        im = im[np.newaxis, :]
        # pred = sess.run(f_cls, feed_dict={x:im, keep_prob:1.0})
        pre_id, predict_, max_score_ = sess.run(
            [max_idx_p, predict, max_score], feed_dict={X: im})
        print("{}:pre_id:{},predict:{},max_score:{}".format(
            image_path, pre_id, predict_, max_score_))
    sess.close()


if __name__ == '__main__':
    # 载入字符集
    label_filename = './dataset/label_char_set.txt'
    char_set = file_processing.read_data(label_filename)
    # #
    batch_size = 64
    image_height = 60
    image_width = 160
    depth = 3
    captcha_size = 4
    models_path = 'models/model-4500'
    image_dir = './dataset/test'

    predict(models_path, image_dir, image_height, image_width, depth, char_set,
            captcha_size)
def load_dataset(dataset_path, filename):
    # dataset_path: 特征向量文件之路径(faceEmbedding.npy)
    # filename: 标签组文件之路径(name.txt)
    compare_emb = np.load(dataset_path)
    names_list = file_processing.read_data(filename)
    return compare_emb, names_list