示例#1
0
def get_data():
    List = []
    facelist = os.listdir('./datasets/original/face/')  #
    for i in range(len(facelist)):
        location = './datasets/original/face/' + facelist[i]
        im = Image.open(location).convert('L')  #读取图片,将全部图片转成大小为24*24的灰度图
        array = np.array(im).astype(float)
        array = imresize(
            array, (24, 24))  # Convert between PIL image and NumPy ndarray
        #im2 = Image.fromarray(array)     # Convert between PIL image and NumPy ndarray
        npdfeature = NPDFeature(array)  #处理数据集数据,提取NPD特征。
        feature = npdfeature.extract()
        List.append(feature)

    nonfacelist = os.listdir('./datasets/original/nonface/')  #
    for i in range(len(nonfacelist)):
        location = './datasets/original/nonface/' + nonfacelist[i]
        im = Image.open(location).convert('L')  #读取图片,将全部图片转成大小为24*24的灰度图
        array = np.array(im).astype(float)
        array = imresize(
            array, (24, 24))  # Convert between PIL image and NumPy ndarray
        #im2 = Image.fromarray(array)     # Convert between PIL image and NumPy ndarray
        npdfeature = NPDFeature(array)  #处理数据集数据,提取NPD特征。
        feature = npdfeature.extract()
        List.append(feature)

    file = open('feature.txt', 'wb')  # 用pickle库中的dump()函数将预处理后的特征数据保存到缓存中
    pickle.dump(List, file)
    file.close()
示例#2
0
def mk_dataset():
    face_dir = "/Users/zoushuai/Python/lab/datasets/original/face"
    nonface_dir = "/Users/zoushuai/Python/lab/datasets/original/nonface"
    face_list = resize_image(face_dir)
    nonface_list = resize_image(nonface_dir)
    train_set = face_list[0:250]
    train_set.extend(
        nonface_list[0:250])  # trainset contains 250 faces and 250 nonfaces
    train_set = np.array(train_set)
    validate_set = face_list[250:500]
    validate_set.extend(nonface_list[250:500]
                        )  # validateset contains 250 faces and 250 nonfaces
    validate_set = np.array(validate_set)
    train_img2feature_list = []
    validate_img2feature_list = []

    for i in range(500):
        npdFeature_train = NPDFeature(train_set[i])
        train_img2feature_list.append(npdFeature_train.extract())
        npdFeature_validate = NPDFeature(validate_set[i])
        validate_img2feature_list.append(npdFeature_validate.extract())
    train_img2feature_list = np.array(train_img2feature_list)
    validate_img2feature_list = np.array(validate_img2feature_list)
    AdaBoostClassifier.save(train_img2feature_list, 'train')
    AdaBoostClassifier.save(validate_img2feature_list, 'validate')
示例#3
0
def getFeature(Path, savePath):
    for file in os.listdir(Path):
        file_abs_path = os.path.join(Path, file)
        if os.path.isdir(file_abs_path):
            getFeature(file_abs_path, savePath)
        if os.path.isfile(file_abs_path):
            if os.path.exists(file_abs_path):
                #using NPDFeature class to get feature
                im = np.array(Image.open(file_abs_path))
                #print("image.shape = ", im.shape)
                #print("**8*************======", im)
                npdf = NPDFeature(im)
                pic_features = npdf.extract()
                print("pic_features", pic_features.shape)

                #to get path for saving features as files
                parent_path_name = os.path.dirname(file_abs_path).split("/")[-1]
                save_path = os.path.join(savePath, parent_path_name)
                save_path = os.path.join(save_path, os.path.splitext(file)[0])

                if not os.path.exists(save_path):
                    #dump to file 
                    output = open(save_path, "wb")
                    PROROCOL = 0
                    pickle.dump(pic_features, output, PROROCOL)
示例#4
0
def readimg(): 
    currentpath1='./datasets/original/face/face_'
    currentpath2='./datasets/original/nonface/nonface_'
    for i in range(0,500):
        img_face = mpimg.imread(currentpath1+"{:0>3d}".format(i)+".jpg")
        img_face_=rgb2gray(img_face)
        f=NPDFeature(img_face_)
        feature_=f.extract()
        feature.append(feature_)
        label.append(1)

    for i in range(0,500):
        img_nonface=mpimg.imread(currentpath2+"{:0>3d}".format(i)+".jpg")
        img_nonface_=rgb2gray(img_nonface)
        f=NPDFeature(img_nonface_)
        feature_=f.extract()
        feature.append(feature_)
        label.append(-1)
示例#5
0
def extract_to_list(path_from):
    result = []
    for f in os.listdir(path_from):
        i = Image.open(path_from + "/" + f)
        im_array = np.array(i)
        npd = NPDFeature(im_array)
        features = npd.extract()
        result.append(features)
    return result
示例#6
0
def trainX():
    #构建人脸数据
    dir_name = 'datasets/original/face/face_'
    im_list = []
    for i in range(0, 100):
        i = str(i).zfill(3)
        im = Image.open(dir_name + i + '.jpg').convert('L')
        im = im.resize((24, 24))
        im = array(im)
        im_list.append(im)
    # im_list
    #提取特征
    NF = NPDFeature(im_list[0])
    feature = NF.extract()
    f_list = feature
    for i in range(1, 100):
        NF = NPDFeature(im_list[i])
        feature = NF.extract()
        f_list = append(f_list, feature)
    f_list = f_list.reshape(100, 165600)
    #构建非人脸数据
    dir_name = 'datasets/original/nonface/nonface_'
    imn_list = []
    for i in range(0, 50):
        i = str(i).zfill(3)
        im = Image.open(dir_name + i + '.jpg').convert('L')
        im = im.resize((24, 24))
        im = array(im)
        imn_list.append(im)
    # imn_list
    #调用NPDFeatureclass提取特征
    NF = NPDFeature(imn_list[0])
    feature = NF.extract()
    fn_list = feature
    print(feature.shape)
    for i in range(1, 50):
        NF = NPDFeature(imn_list[i])
        feature = NF.extract()
        fn_list = append(fn_list, feature)
    fn_list = fn_list.reshape(50, 165600)
    #将数据合并成训练集
    X = concatenate((f_list, fn_list), axis=0)
    return X
示例#7
0
def get_npdArray_from_diskImg(pathDir):
    list = []
    file_names = os.listdir(pathDir)
    for file_name in file_names:
        img = Image.open('%s%s' % (pathDir, file_name))
        img = img.resize((24, 24))
        img = np.array(img.convert('L'))
        npdFeature = NPDFeature(img)
        npdArray = npdFeature.extract()
        list.append(npdArray)
    faces_npdArray = np.array(list)
    return faces_npdArray
示例#8
0
def testX():
    dir_name = 'datasets/original/face/face_'
    im_list_test = []
    for i in range(100, 150):
        i = str(i).zfill(3)
        im = Image.open(dir_name + i + '.jpg').convert('L')
        im = im.resize((24, 24))
        im = array(im)
        im_list_test.append(im)
    # im_list_test
    #提取特征
    NF = NPDFeature(im_list_test[0])
    feature = NF.extract()
    f_list_test = feature
    for i in range(1, 50):
        NF = NPDFeature(im_list_test[i])
        feature = NF.extract()
        f_list_test = append(f_list_test, feature)
    f_list_test = f_list_test.reshape(50, 165600)
    #构建测试集的非人脸特征
    dir_name = 'datasets/original/nonface/nonface_'
    imn_list_test = []
    for i in range(50, 75):
        i = str(i).zfill(3)
        im = Image.open(dir_name + i + '.jpg').convert('L')
        im = im.resize((24, 24))
        im = array(im)
        imn_list_test.append(im)
    imn_list_test
    #提取特征
    NF = NPDFeature(imn_list_test[0])
    feature = NF.extract()
    fn_list_test = feature
    for i in range(1, 25):
        NF = NPDFeature(imn_list_test[i])
        feature = NF.extract()
        fn_list_test = append(fn_list_test, feature)
    fn_list_test = fn_list_test.reshape(25, 165600)
    X_test = concatenate((f_list_test, fn_list_test), axis=0)
    return X_test
示例#9
0
def preprocess_image():
    os.makedirs("datasets\\features")
    # preprocess face images
    for i in range(500):
        # set the fetch address
        fetch_address = "datasets\\original\\face\\face_%03d.jpg" % i
        # read jpg file
        # im = matplotlib.image.imread(fetch_address)
        # transform to the 24*24 gray image
        # xi = transform_image(im)
        # create a NPDFeature class
        xi = Image.open(fetch_address).convert('L').resize((24, 24))
        xi = np.array(xi)
        npd = NPDFeature(xi)
        # extract feature
        feature = npd.extract()
        # set the filename
        filename = "datasets\\features\\face%03d.pickle" % i
        # save the feature in pickle file
        save_data(filename, feature)

    # preprocess nonface images
    for i in range(500):
        # set the fetch address
        fetch_address = "datasets\\original\\nonface\\nonface_%03d.jpg" % i
        # read jpg file
        # im = matplotlib.image.imread(fetch_address)
        # transform to the 24*24 gray image
        # xi = transform_image(im)
        # create a NPDFeature class
        xi = Image.open(fetch_address).convert('L').resize((24, 24))
        xi = np.array(xi)
        npd = NPDFeature(xi)
        # extract feature
        feature = npd.extract()
        # set the filename
        filename = "datasets\\features\\nonface%03d.pickle" % i
        # save the feature in pickle file
        save_data(filename, feature)
示例#10
0
def get_feature(path):
    features = numpy.array([])
    files = os.listdir(path)
    for k in range(len(files)):
        im = Image.open(path + files[k])
        image = numpy.ones(shape=(24, 24), dtype=int)
        for i in range(24):
            for j in range(24):
                image[i][j] = im.getpixel((i, j))
        NPDFeature1 = NPDFeature(image)
        feature = NPDFeature1.extract()
        features = numpy.concatenate((features, feature))
    return features
示例#11
0
def exact_nonface():
    #处理nonface
    i=0;
    feature=[]
    label=[]
    while i<500:
        img_wait_deal=np.array(Image.open("/home/kodgv/第三次实验/ML2017-lab-03/huidutu/non" + str(i) + ".jpg"))
        NPD=NPDFeature(img_wait_deal)
        label.append(-1)
        feature.append(list(NPD.extract()))
        i+=1
    np.save("nonfeature.npy",np.array(feature))
    np.save("nonlabel.npy",np.array(label).reshape(1,len(label)))
示例#12
0
    def Feature_extract():
        #提取特征
        face_path = '.\\datasets\\original\\face\\face_%03d.jpg'
        faces_path = []
        for i in range(500):
            faces_path.append(face_path % i)

        nonface_path = '.\\datasets\\original\\nonface\\nonface_%03d.jpg'
        nonfaces_path = []
        for i in range(500):
            nonfaces_path.append(nonface_path % i)

        train = np.zeros((1000, 165600))
        for i in range(500):
            img = Image.open(faces_path[i])
            img = img.convert('L').resize((24, 24))
            nf = NPDFeature(np.array(img))
            train[i * 2] = nf.extract()

            img = Image.open(nonfaces_path[i])
            img = img.convert('L').resize((24, 24))
            nf = NPDFeature(np.array(img))
            train[i * 2 + 1] = nf.extract()
        AdaBoostClassifier.save(train, 'train.txt')
示例#13
0
def load_data():
    face_path = u'C:/Users/47864/Desktop/Data/datasets/original/face'
    nonface_path = u'C:/Users/47864/Desktop/Data/datasets/original/nonface'
    face_image = os.listdir(face_path)
    nonface_image = os.listdir(nonface_path)
    num_face_image = len(face_image)
    num_nonface_image = len(nonface_image)

    dataset = []
    for i in range(num_face_image):
        img = Image.open(face_path + '/' + face_image[i])
        img = img.convert('L')
        img = img.resize((24, 24), Image.ANTIALIAS)
        img = NPDFeature(np.array(img))
        dataset.append(np.concatenate((img.extract(), np.array([1]))))

    for i in range(num_nonface_image):
        img = Image.open(nonface_path + '/' + nonface_image[i])
        img = img.convert('L')
        img = img.resize((24, 24), Image.ANTIALIAS)
        img = NPDFeature(np.array(img))
        dataset.append(np.concatenate((img.extract(), np.array([-1]))))

    return dataset
示例#14
0
def extract_fea(img_dirs, img_labels, store_name):
    '''
    预处理阶段,处理为24*24,灰度图
    '''
    fea_list = []
    for i in range(len(img_dirs)):
        temp_img = io.imread(img_dirs[i])
        temp_gray_img = color.rgb2gray(temp_img)
        temp_resized_img = transform.resize(temp_gray_img, (24, 24))
        temp_resized_img = img_as_ubyte(temp_resized_img)
        #提取特征
        npd_fea = NPDFeature(temp_resized_img)
        temp_fea = npd_fea.extract()
        temp_label = img_labels[i]
        fea_list.append((temp_fea, temp_label))
    o_file = open(store_name, 'wb')
    pickle.dump(fea_list, o_file, -1)
    o_file.close()
示例#15
0
def read_data():
    X = []
    y = []
    for i in range(500):
        path = get_i_face_image_path(i)
        image = Image.open(path)
        image = image.convert('L')
        image = image.resize((16, 16))
        X.append(image)
        y.append(1)
    for i in range(500):
        path = get_i_nonface_image_path(i)
        image = Image.open(path)
        image = image.convert('L')
        image = image.resize((16, 16))
        X.append(image)
        y.append(-1)

    feature = []
    for i in tqdm(range(len(X)), desc='pre_train', leave=True):
        array_image = np.array(X[i])
        fea = NPDFeature(array_image)
        feature.append(fea.extract())
    return feature, y
示例#16
0
def extra_img_features():
    for i in range(0, len(img)):
        f = NPDFeature(img[i])
        features = f.extract()
        img_features.append(features)