예제 #1
0
def predict_dog(model, predict_img=test_jinmao):
    img = Image.open(predict_img)
    test = np.asarray(img, dtype="float32")
    sample = np.empty((1, DOG_PIC_WIDTH, DOG_PIC_HEIGHT, 3))
    sample[0, :, :, :] = test.reshape(DOG_PIC_WIDTH, DOG_PIC_HEIGHT, 3)
    predictions = model.predict(sample)
    write_log(str(predictions), file=log_file)
예제 #2
0
def load_model_and_evaluate(model_name):
    model = keras.models.load_model(os.path.join(test_6_MODEL, model_name))
    test_inputs, test_labels = create_test_samples()
    if model:
        test_loss, test_acc = model.evaluate(test_inputs, test_labels)
        write_log("loss:%s \n acc:%s" % (str(test_loss), str(test_acc)))
    return model
예제 #3
0
def predict_dog(dog_pic_path: str, model_name=model_name):
    img = Image.open(os.path.join(test_dir, dog_pic_path))
    arr = np.asarray(img, dtype="float32").reshape(585, 460, 3)
    data = np.empty((1, 585, 460, 3), dtype="float32")
    data[0, :, :, :] = arr

    model = keras.models.load_model(os.path.join(test_6_MODEL, model_name))
    results = model.predict(data)
    write_log(str(results), file="_test_7.log")
예제 #4
0
def test_model():
    dog_dir = [DOG_SUMU_DIR, DOG_JINMAO_DIR]
    pic_width = DOG_PIC_WITDH
    pic_height = DOG_PIC_HEIGHT
    trains, labels = generate_trans_data(dog_dir, pic_width, pic_height)
    print(trains.shape, labels.shape)
    write_log(str(labels), file="test_8.log")
    model = DogsKindModl(pic_width, pic_height)
    model.compile_model()
    model.fit_model(trains, labels)
    model.save_model()
예제 #5
0
def opencv_CascadeClassify():
    """
		使用OpenCV的人脸检测器进行人脸的初步检测,
		使用框架训练CNN网络进行人脸的二分类判定,
		将两部分合在一起完成人脸检测。
		此环节需注意根据应用场景调整参数,
		做到性能与召回率的平衡。
		
		args:
		returns:
			(x,y,w,h) type: tuple
			x,y 返回人脸矩形的左上角坐标
			w,y 返回矩形的宽和高

	"""

    for xml in os.listdir(haar_cascade_dir):
        xml = os.path.join(haar_cascade_dir, xml)
        # write_log(str(xml),file=lg)
        face_cascade = cv.CascadeClassifier(xml)
        # eye_cascade = cv.CascadeClassifier(harr_cascade_eye_xml)
        img = cv.imread(man_test_jpg)
        print(type(img))
        print(str(img.shape))
        gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
        print(type(gray))
        write_log(str(type(img)), file=lg)
        write_log(str(img), file=lg)
        write_log(str(type(gray)), file=lg)
        write_log(str(gray), file=lg)
        cv.waitKey(0)

        faces = face_cascade.detectMultiScale(
            img,
            scaleFactor=1.1,
            minNeighbors=3,
            minSize=(20, 30),
            # flags=cv.cv.CV_HAAR_SCALE_IMAGE
        )
        print("length of faces is %d" % (len(faces)))
        if len(faces) == 0:
            # write_log("failed this file %s"%(xml),file=lg)
            continue
        else:
            # write_log("success this file %s"%(xml),file=lg)
            for x, y, w, h in faces:
                cv.rectangle(img, (x, y), (x + w, y + h), (0, 0, 0), 2)
                write_log(str(w) + str(h), file=lg)
            return faces
            # cv.imshow("myself.com",img)
            # cv.waitKey(0)
            # write_log(str(faces),file=lg)
            break
예제 #6
0
def generate_model(model_name: str):

    train_size = len(os.listdir(DOG_SUMU_DIR)) + len(
        os.listdir(DOG_JINMAO_DIR))
    train_fi = len(os.listdir(DOG_JINMAO_DIR))
    # (72,585,460,3)
    trains = np.empty((train_size, DOG_PIC_WIDTH, DOG_PIC_HEIGHT, 3),
                      dtype="float32")
    labels = np.empty((train_size), dtype="int32")
    for ind in range(len(os.listdir(DOG_JINMAO_DIR))):
        img = Image.open(
            os.path.join(DOG_JINMAO_DIR,
                         os.listdir(DOG_JINMAO_DIR)[ind]))
        arr = np.asarray(img, dtype="float32")
        trains[ind, :, :, :] = arr.reshape(DOG_PIC_WIDTH, DOG_PIC_HEIGHT,
                                           3)  # (460,585,3)
        labels[ind] = 0

    for ind in range(len(os.listdir(DOG_SUMU_DIR))):
        img = Image.open(
            os.path.join(DOG_SUMU_DIR,
                         os.listdir(DOG_SUMU_DIR)
                         [ind]))  # tf.image.decode_jpeg(img,channels=3)
        arr = np.asarray(img, dtype="float32")
        trains[ind + train_fi, :, :, :] = arr.reshape(DOG_PIC_WIDTH,
                                                      DOG_PIC_HEIGHT, 3)
        labels[ind + train_fi] = 1

        # (74,460,585,3)
    write_log(str(labels), file=log_file)

    model = keras.Sequential([
        keras.layers.Flatten(input_shape=(DOG_PIC_WIDTH, DOG_PIC_HEIGHT, 3)),
        keras.layers.Dense(2,
                           activation=tf.nn.relu,
                           input_shape=(DOG_PIC_WIDTH, DOG_PIC_HEIGHT, 3)),
        keras.layers.Dense(2, activation=tf.nn.relu),
        keras.layers.Dense(2, activation=tf.nn.softmax),
    ])

    model.compile(
        optimizer=tf.train.AdamOptimizer(),
        loss="sparse_categorical_crossentropy",
        metrics=["accuracy"],
    )

    # expect (585,460,3)
    model.fit(trains, labels, epochs=1, steps_per_epoch=20)
    model.save(test_6_MODEL + "/" + model_name)
예제 #7
0
def predict_apicture(pic_path, model_path, dog_width, dog_height):
    """
    ARGS:
        pic_path:预测图片路径
        model_path:模型存储路径
    RETURNS:
        result: 分类标签名
    """
    img = Image.open(pic_path)
    arr = np.asarray(img, dtype="float32")
    data = np.empty((1, dog_width, dog_height, 3), dtype="float32")
    data[0, :, :, :] = arr.reshape(dog_width, dog_height, 3)

    from keras.models import load_model

    model = load_model(model_path)
    results = model.predict(data)
    write_log(str(results), file="test_8.log")
    pass
예제 #8
0
def generate_model_then_predict_model(model_name: str, epochs=2):

    train_size = len(os.listdir(DOG_SUMU_DIR)) + len(
        os.listdir(DOG_JINMAO_DIR))
    train_fi = len(os.listdir(DOG_JINMAO_DIR))
    trains = np.empty((train_size, DOG_PIC_WIDTH, DOG_PIC_HEIGHT, 3),
                      dtype="float32")
    labels = np.empty((train_size), dtype="int32")

    for ind in range(len(os.listdir(DOG_JINMAO_DIR))):
        img = Image.open(
            os.path.join(DOG_JINMAO_DIR,
                         os.listdir(DOG_JINMAO_DIR)[ind]))
        arr = np.asarray(img, dtype="float32")
        trains[ind, :, :, :] = arr.reshape(DOG_PIC_WIDTH, DOG_PIC_HEIGHT,
                                           3)  # (460,585,3)
        labels[ind] = 0

    for ind in range(len(os.listdir(DOG_SUMU_DIR))):
        img = Image.open(
            os.path.join(DOG_SUMU_DIR,
                         os.listdir(DOG_SUMU_DIR)
                         [ind]))  # tf.image.decode_jpeg(img,channels=3)
        arr = np.asarray(img, dtype="float32")
        trains[ind + train_fi, :, :, :] = arr.reshape(DOG_PIC_WIDTH,
                                                      DOG_PIC_HEIGHT, 3)
        labels[ind + train_fi] = 1

    write_log(str(trains.shape), file=log_file)

    model.compile(
        optimizer=tf.train.AdamOptimizer(),
        loss="sparse_categorical_crossentropy",
        metrics=["accuracy"],
    )

    # model.fit(trains,epochs=epochs,steps_per_epoch=20)
    model.fit(trains)
    model.save(test_6_MODEL + "/" + model_name)
예제 #9
0
                 (2  0)
    k(x, y) = x  (    ) y.T
                 (0  1)
    """
    M = np.array([[2, 0], [0, 1.0]])
    return np.dot(np.dot(x, M), y.T)


h = 0.02  # step size in the mesh

# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
result = clf.fit(x, y)

write_log(str(type(clf)), file=log_file)
write_log(str(result), file=log_file)

x_min, x_max = x[:, 0].min() - 1, x[:, 0].max() + 1
y_min, y_max = x[:, 1].min() - 1, x[:, 1].max() + 1


xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
z = clf.predict(np.c_[xx.ravel(), yy.ravel()])

# Put the result into a color plot
z = z.reshape(xx.shape)
plt.pcolormesh(xx, yy, z, cmap=plt.cm.Paired)

# Plot also the training points
plt.scatter(x[:, 0], x[:, 1], c=y, cmap=plt.cm.Paired, edgecolors="k")
예제 #10
0
对于有颜色的图片,会在三个channel上都计算梯度。
那么相应的幅值就是三个channel上最大的幅值,
角度(方向)是最大幅值所对应的角。
方向是像素强度变化方向

https://blog.csdn.net/passball/article/details/82254256
https://blog.csdn.net/wjb820728252/article/details/78395092

"""

image = man_test_jpg
im = cv2.imread(image)
rgb = np.asarray(im)
hog = cv2.HOGDescriptor()
h = hog.compute(im)

gx = cv2.Sobel(im, cv2.CV_32F, 1, 0, ksize=1)  # im 在 x 方向上的梯度
gy = cv2.Sobel(im, cv2.CV_32F, 0, 1, ksize=1)

mag, angle = cv2.cartToPolar(gx, gy, angleInDegrees=True)  # 梯度值 和方向(角度)

write_log(str(mag), file=lg)
write_log(str(angle), file=lg)
write_log(str(mag.shape) + "size:" + str(mag.size), file=lg)
write_log(str(angle.shape) + "size:" + str(angle.size), file=lg)

# write_log(str(rgb.shape))
# write_log(str(h.shape))
# write_log(str(rgb.size))
write_log(str(h.size), file=lg)
예제 #11
0
# linear: (x,x')
# polynomial:   is specified by keyword degree,  by coef0.
# rbf: .  is specified by keyword gamma, must be greater than 0.
# sigmoid (), where  is specified by coef0.

# write_log(str(datas),file=lf)
# write_log(str(labels),file=lf)

# write_log(str(datas.shape),file=lf)
# write_log(str(labels.shape),file=lf)

cf = SVR(kernel="rbf")

print(datas.size, datas.shape)
datas = datas.reshape((4, 220 * 220 * 3))
write_log(str(datas[0]), file=lf)
write_log(str(datas[0].shape), file=lf)

labels = ["1", "2", "3", "4"]
cf.fit(datas, labels)

result = cf.predict(datas[0:])

write_log(str(result), file=lf)

# avr = avr/4
# avr = avr.astype("uint8")
# avr_im = Image.fromarray(avr)
# avr_im.show()
# print(avr.shape)
# print(avr.size)
예제 #12
0
import cv2 as cv

from __init__ import write_log, you_jpg

lg = "_test_cnn_9_9.log"

img = cv.imread(you_jpg)

hog = cv.HOGDescriptor()
foundLocations, foundWeights = hog.detectMultiScale(img)
foundLocations_d, weights = hog.detect(img)
descriptors = hog.compute(img)

write_log(str(foundLocations), file=lg)
write_log(str(foundLocations_d), file=lg)
write_log(str(descriptors), file=lg)
예제 #13
0
from sklearn import datasets

from __init__ import write_log

log_file = "_test_cnn_9_4.log"

diabetes = datasets.load_diabetes()

write_log(str(len(diabetes.data[0])), file=log_file)
예제 #14
0
    "Pullover",
    "Dress",
    "Coat",
    "Sandal",
    "Shirt",
    "Sneaker",
    "Bag",
    "Ankle boot",
]

fashion_mnist = keras.datasets.fashion_mnist
(x, y), (x_t, y_t) = fashion_mnist.load_data()
x = x / 255.0
x_t = x_t / 255.0

write_log(str(x), file="_test_5.log")
write_log(str(y), file="_test_5.log")
write_log(str(x_t), file="_test_5.log")

model = keras.Sequential([
    keras.layers.Flatten(input_shape=(28, 28)),
    keras.layers.Dense(128, activation=tf.nn.relu),
    keras.layers.Dense(10, activation=tf.nn.softmax),
])

model.compile(
    optimizer=tf.train.AdamOptimizer(),
    loss="sparse_categorical_crossentropy",
    metrics=["accuracy"],
)
예제 #15
0
在scikit-learn库中,有AdaBoostRegression(回归)和AdaBoostClassifier(分类)两个
在对和AdaBoostClassifier进行调参时,主要是对两部分进行调参:1)AdaBoost框架调参;2)弱分类器调参
"""

from sklearn.datasets import load_iris
from sklearn.ensemble import AdaBoostClassifier

# 导包
from sklearn.model_selection import cross_val_score

from __init__ import write_log

log_file = "test_cnn_9_1.log"
# 载入数据,sklearn中自带的iris数据集
iris = load_iris()
write_log(str(iris), file=log_file)
"""
AdaBoostClassifier参数解释
base_estimator:弱分类器,默认是CART分类树:DecisionTressClassifier
algorithm:在scikit-learn实现了两种AdaBoost分类算法,即SAMME和SAMME.R,
           SAMME就是原理篇介绍到的AdaBoost算法,指Discrete AdaBoost
           SAMME.R指Real AdaBoost,返回值不再是离散的类型,而是一个表示概率的实数值,算法流程见后文
                            两者的主要区别是弱分类器权重的度量,SAMME使用了分类效果作为弱分类器权重,SAMME.R使用了预测概率作为弱分类器权重。
           SAMME.R的迭代一般比SAMME快,默认算法是SAMME.R。因此,base_estimator必须使用支持概率预测的分类器。
loss:这个只在回归中用到,不解释了
n_estimator:最大迭代次数,默认50。在实际调参过程中,常常将n_estimator和学习率learning_rate一起考虑
learning_rate:每个弱分类器的权重缩减系数v。f_k(x)=f_{k-1}*a_k*G_k(x)。较小的v意味着更多的迭代次数,默认是1,也就是v不发挥作用。
另外的弱分类器的调参,弱分类器不同则参数不同,这里不详细叙述
"""
# 构建模型
clf = AdaBoostClassifier(n_estimators=100)  # 弱分类器个数设为100