示例#1
0
def detect_face(path):
    img = cv2.imdecode(np.fromfile(path, dtype=np.uint8), 1)
    img = utils.img_resize(img)
    height, width = img.shape[0:2]
    print(height, width)
    print("========== detect face ===========")
    for i in range(3):
        img_grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        front_face_detected = frontFaceClassifier.detectMultiScale(
            img_grey,
            scaleFactor=1.2,
            minNeighbors=5,
            flags=cv2.CASCADE_SCALE_IMAGE)

        if len(front_face_detected) != 0:
            print(path)
            print("rotate " + str(i) + " times")
            print(img.shape)
            for x, y, w, h in front_face_detected:
                print(x, y, w, h)
            #     cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 255), 2)
            # show(img, 0)
            if i != 0:
                cv2.imencode('.jpg', img)[1].tofile(path)
            return True
        else:
            img = utils.rotate_image(img, 90)
    return False
示例#2
0
    def run(self):
        self.state_running.set()
        self.cap = cv2.VideoCapture(self.read_file_path)
        if not self.cap.isOpened():
            messagebox.showerror(title="错误:", message="视频打开失败")
            self.cap.release()

        cur_frame_index = 0
        while True:
            self.state_no_pause.wait()  # for pause
            if not self.state_running.isSet():  # for stop
                self.cap.release()
                break
            # 核心工作区
            ret, frame = self.cap.read()
            if ret:
                cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                img = Image.fromarray(cv2image)
                img = utils.img_resize(self.APP.label_width,
                                       self.APP.label_height, img)
                img_tk = ImageTk.PhotoImage(image=img)
                self.APP.refresh_main_label(photo=img_tk)
                self.APP.app_refresh_cur_frame_count(cur_frame_index)
                cur_frame_index += 1

            else:  # 主动结束: 读取完毕
                self.state_running.clear()
                self.cap.release()
                messagebox.showinfo("读取视频", "视频读取完毕!")
                self.APP.app_refresh()
                break
示例#3
0
def upload_file():
    if request.method == 'POST':
        # check if the post request has the file part
        if 'file' not in request.files:
            flash('No file part')
            return redirect(request.url)
        file = request.files['file']

        # if user does not select file, browser also
        # submit a empty part without filename
        if file.filename == '':
            flash('No selected file')
            return redirect(request.url)
        if file and allowed_file(file.filename):
            filename = secure_filename(file.filename)
            path_name, ext = filename.rsplit('.', 1)
            in_memory_file = io.BytesIO()
            file.save(in_memory_file)
            data = np.fromstring(in_memory_file.getvalue(), dtype=np.uint8)
            img = img_resize(data)

            if ext.lower() != "jpg" or ext.lower() != "jpeg":
                filename = path_name + '.jpg'

            cv2.imwrite(os.path.join(app.config['UPLOAD_FOLDER'], filename),
                        img, [int(cv2.IMWRITE_JPEG_QUALITY), 20])

            return redirect(url_for('uploaded_file', filename=filename))
    return '''
def recognize_information(path, config):
    image = cv2.imdecode(np.fromfile(path, dtype=np.uint8), 1)
    image = utils.img_resize(image, )
    print("\n-----------------------------------")
    print(path)
    print(image.shape)
    for i in range(3):
        print("+++" + str(i) + "+++")
        if config == 1:
            imagegray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
            retval, img = cv2.threshold(imagegray, 120, 255,
                                        cv2.THRESH_OTSU + cv2.THRESH_BINARY)
        elif config == 2:
            img = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
        elif config == 3:
            img = image
        utils.show(img, 1)
        text = pytesseract.image_to_string(img, lang='chi_sim')
        print(text)
        if (text.find("学 位") != -1) | (text.find("学 士") != -1):
            print("=====学位证=====")
            # cv2.imencode('.jpg', image)[1].tofile(path)
            # utils.show(img, 1)
            return 1
        elif (text.find("人 力") != -1) | (text.find("资 源") != -1) | (text.find("人 事") != -1) \
                | (text.find("工 作") != -1) | (text.find("甲 方") != -1) | (text.find("劳 动") != -1) \
                | (text.find("双 方") != -1) | (text.find("符 合") != -1) | (text.find("知 识") != -1):
            return 5
        elif (text.find("成 人") != -1) | (text.find("高 等") != -1) | (text.find("毕 业") != -1) \
                | (text.find("合 格") != -1) | (text.find("半 业") != -1) | (text.find("课 程") != -1) \
                | (text.find("单 业") != -1) | (text.find("毗 业") != -1) | (text.find("注 册") != -1):
            print("=====毕业证=====")
            # cv2.imencode('.jpg', image)[1].tofile(path)
            # utils.show(img, 1)
            return 2
        elif text.find("合 同") != -1:
            return 4
        elif (text.find("居 民") != -1) | (text.find("公 民") != -1) | (text.find("民 身") != -1) \
                | (text.find("份 证") != -1) | (text.find("身 休") != -1) | (text.find("休 证") != -1):
            print("=====身份证=====")
            cv2.imencode('.jpg', image)[1].tofile(path)
            utils.show(img, 1)
            return 3
        else:
            image = utils.rotate_image(image, 90)
            utils.show(img, 1)

    print("=====----=====")
    return -1
示例#5
0
def crop_main(img_path):
    # base_path=img_path.split("/")
    img = cv2.imdecode(np.fromfile(img_path, dtype=np.uint8), 1)
    img = utils.img_resize(img, 900)
    binary_img = utils.getCanny(img, 20, 50, 3, 0)
    # max_contour, max_area = utils.findMaxContour(binary_img)
    contours, _ = cv2.findContours(binary_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
    for i in range(len(contours)):
        area = cv2.contourArea(contours[i])
        rotate = False
        if area > 5000:
            x, y, w, h = cv2.boundingRect(contours[i])
            if w > h:
                if (w / h < 1) | (w / h > 2):
                    break
                else:
                    print(w, h)
            else:
                if (h / w < 1) | (h / w > 2):
                    break
                else:
                    print(w, h)
                    rotate = True
            image = img[y - 10:y + h + 10, x - 10:x + w + 10]
            print(image.shape)
            if rotate:
                image = utils.rotate_image(image, 90)
            image = cv2.resize(image, (856, 540), interpolation=cv2.INTER_CUBIC)
            print(image.shape)
            if len(image.shape) == 3:
                image_grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            else:
                image_grey = image

            front_face_detected = frontFaceClassifier.detectMultiScale(image_grey, scaleFactor=1.2,
                                                                       minNeighbors=5, flags=cv2.CASCADE_SCALE_IMAGE)

            if len(front_face_detected) != 0:
                for x, y, w, h in front_face_detected:
                    if x < 856 / 2:
                        image = utils.rotate_image(image, 180)
                    # cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 255), 2)
            utils.show(image, 1)
            name = img_path.split("\\")[1]
            # 把框出来的身份证图片另存为
            cv2.imencode('.jpg', image)[1].tofile(
                "E:/KingT/staff/test/all身份证/" + name + "_" + str(i) + img_path.split("\\")[2])
示例#6
0
 def get(self, *args, **kwargs):
     try:
         app_os = self.get_argument('os', '')
         icon = self.get_argument('icon', '')
         subscript = self.get_argument('subscript', '')
         icon_set = self.app_icon_set.get(app_os)
         if not icon_set or not icon:
             self.render_obj(dict(code=400, msg='arguments error'))
             return
         img_icon, fp_icon, _ = utils.get_upload_image_file(
             self.upload_path, 'icon', icon)
         img_sub, fp_sub = None, ''
         if not img_icon:
             self.render_obj(dict(code=404, msg='image file not found'))
             return
         if subscript:
             img_sub, fp_sub, _ = utils.get_upload_image_file(
                 self.upload_path, 'subscript', subscript)
             if not img_sub:
                 self.render_obj(dict(code=404, msg='image file not found'))
                 return
         new_img = utils.img_composite(img_icon,
                                       img_sub) if img_sub else img_icon
         fp = '%s-%s' % (fp_icon, fp_sub) if fp_sub else fp_icon
         app_os_path = os.path.join(self.media_path, 'icons', app_os)
         new_img_path = os.path.join(app_os_path, fp)
         for d in icon_set:
             size = d.get('size', '')
             filename = d.get('filename', '')
             if not filename or not size:
                 continue
             d_img = utils.img_resize(new_img, size)
             if not d_img:
                 continue
             utils.img_save(d_img, new_img_path, filename)
         # 生成所有icon的zip文件
         zf_name = os.path.join(app_os_path, '%s.zip' % fp)
         zf = zipfile.ZipFile(zf_name, 'w', zipfile.ZIP_DEFLATED)
         for fn in os.listdir(new_img_path):
             zf.write(os.path.join(new_img_path, fn), os.path.join(fp, fn))
         zf.close()
         self.render_obj(dict(code=200, msg='success', fp=fp))
     except:
         traceback.print_exc()
         self.render_obj(dict(code=500, msg='system error'))
示例#7
0
def devide_jpgs(jpg_dir, norm_img_size=True):
    '''カラーならc.jpg、グレーでよければグレスケ化してg.jpgで保存'''
    out_dir = utils.make_outdir(jpg_dir, NEW_JPG_DIR)
    for jpg_path in utils.get_path_list(jpg_dir, 'jpg'):
        img = cv2.imread(jpg_path)
        if img is None:
            continue

        # norm_img_sizeが指定されていればリサイズ
        if norm_img_size:
            img = utils.img_resize(img, max_height=config.MAX_HEIGHT)

        if is_color_img(img):
            cv2.imwrite(
                join(out_dir,
                     basename(jpg_path).replace('.jpg', 'c.jpg')), img)
        else:
            img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            cv2.imwrite(
                join(out_dir,
                     basename(jpg_path).replace('.jpg', 'g.jpg')), img)
示例#8
0
    frame = utils.rescale_frame(frame, percent = 50)
    gray            = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    faces           = face_cascade.detectMultiScale(gray, scaleFactor=1.5, minNeighbors=5)

    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA)

    for (x, y, w, h) in faces:
        roi_gray    = gray[y:y+h, x:x+h] # rec
        roi_color   = frame[y:y+h, x:x+h]
        #cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255), 3)

        eyes = eyes_cascade.detectMultiScale(roi_gray, scaleFactor=1.5, minNeighbors=5)
        for (ex, ey, ew, eh) in eyes:
            #cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 3)
            roi_eyes = roi_gray[ey: ey + eh, ex: ex + ew]
            glasses2 = utils.img_resize(glasses.copy(), width=ew)

            gw, gh, gc = glasses2.shape
            for i in range(0, gw):
                for j in range(0, gh):
                    #print(glasses[i, j]) #RGBA
                    if glasses2[i, j][3] != 0: # alpha 0
                        roi_color[ey + i, ex + j] = glasses2[i, j]


        nose = nose_cascade.detectMultiScale(roi_gray, scaleFactor=1.5, minNeighbors=5)
        for (nx, ny, nw, nh) in nose:
            #cv2.rectangle(roi_color, (nx, ny), (nx + nw, ny + nh), (255, 0, 0), 3)
            roi_nose = roi_gray[ny: ny + nh, nx: nx + nw]
            mustache2 = utils.img_resize(mustache.copy(), width=nw)
示例#9
0
    for (x, y, w, h) in faces:
        # Find face roi
        roi_gray = gray[y:y + h, x:x + h]
        roi_colour = frame[y:y + h, x:x + h]
        # cv2.rectangle(frame, (x, y), (x+w, y+h), (255,0,0), 2)

        # Find eye roi
        eyes = eye_cascade.detectMultiScale(gray,
                                            scaleFactor=1.5,
                                            minNeighbors=5)
        for (ex, ey, ew, eh) in eyes:
            eroi_gray = roi_gray[ey:ey + eh, ex:ex + ew]
            eroi_colour = roi_colour[ey:ey + eh, ex:ex + ew]

            # Resize glasses
            rz_glasses = utils.img_resize(glasses.copy(), width=ew)

            # Grab shape of glasses
            gw, gh, gc = rz_glasses.shape

            # Replace face pixels to glasses pixels
            for i in range(0, gw):
                for j in range(0, gh):
                    if rz_glasses[i, j][3] != 0:  # alpha = 0 is transparent
                        frame[i + ey + 8, j + ex] = rz_glasses[i, j]

            # cv2.rectangle(frame, (ex, ey), (ex+ew, ey+eh), (255,0,0), 2)

        noses = nose_cascade.detectMultiScale(gray,
                                              scaleFactor=1.5,
                                              minNeighbors=5)
示例#10
0
'''------------------------start of the program-----------------------------'''

dataNum= 1000
batchSize = 16

with tf.Session(config=tf.ConfigProto(gpu_options=(tf.GPUOptions(per_process_gpu_memory_fraction=0.8)))) as sess:

    '''------------------------Data Load-------------------------'''
    ref, hr = dataload.dataLoader("./SRNTT1000.h5")
    M_t = np.load("autumn1000_M_t.npy")
    M_s = np.load("autumn1000_M_s.npy")
    train_hr, test_hr, train_ref, test_ref, train_Mt, test_Mt, train_Ms, test_Ms \
     = train_test_split(hr, ref, M_t, M_s, test_size=0.2)
    
    train_lr = utils.img_resize(train_hr, 25)
    train_lref = utils.img_resize(train_ref, 25)
    test_lr = utils.img_resize(test_hr, 25)
    test_lred = utils.img_resize(test_ref, 25)

    for i in range(test_ref.shape[0]):
        path = './result/ref/'+ str(i+1) + '.bmp'
        utils.img_save(test_ref[i,:,:,:], path)
        path = './result/lr/' + str(i+1) + '.bmp'
        utils.img_save(test_lr[i,:,:,:], path)
        path = './result/hr/' + str(i+1) + '.bmp'
        utils.img_save(test_hr[i,:,:,:], path)
    

    '''----------------------Net Construct-------------------------'''
    x = tf.placeholder(tf.float32, [None, 40, 40, 3])
示例#11
0
import utils
import dataload
import tensorflow as tf
import numpy as np
import Vgg_module
import patch_match

with tf.Session(config=tf.ConfigProto(gpu_options=(tf.GPUOptions(
        per_process_gpu_memory_fraction=0.8)))) as sess:

    train_ref, train_hr = dataload.dataLoader("./SRNTT1000.h5", 1000)
    train_lr = utils.img_resize(train_hr, 25)
    train_lref = utils.img_resize(train_ref, 25)

    M_t = np.zeros([1000, 40, 40, 256])
    M_s = np.zeros([1000, 40, 40, 256])
    for i in range(10):
        print("-----------------------Round", i, "----------------------")
        ran = list(range(i * 100, (i + 1) * 100))
        M_LR = Vgg_module.vgg19_module(
            utils.img_resize(train_lr[ran, :, :, :], 400), sess)
        M_LRef = Vgg_module.vgg19_module(
            utils.img_resize(train_lref[ran, :, :, :], 400), sess)
        M_Ref = Vgg_module.vgg19_module(train_ref[ran, :, :, :], sess)
        M_t[ran, :, :, :], M_s[ran, :, :, :] = patch_match.Fun_patchMatching(
            M_LR, M_LRef, M_Ref, sess)

    np.save("autumn1000_M_t", M_t)
    np.save("autumn1000_M_s", M_s)
示例#12
0
import numpy as np
import cv2
import utils

cap = cv2.VideoCapture(0)
# width = cap.get(3)
# height = cap.get(4)

img_path = 'images/logo/cfe-coffee.png'
logo = cv2.imread(img_path, -1)
# Resize logo img
watermark = utils.img_resize(logo, height=250)
# Grayscale watermark
# watermark = cv2.cvtColor(watermark, cv2.COLOR_BGR2GRAY)
# Change watermark to 4channel
watermark = cv2.cvtColor(watermark, cv2.COLOR_BGR2BGRA)
water_h, water_w, water_c = watermark.shape
cv2.imshow('watermark', watermark)

while (True):
    # Capture frame-by-frame
    ret, frame = cap.read()
    frame = utils.rescale_frame(frame, percent=50)
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA)
    cv2.rectangle(frame, (50, 150), (75, 175), (255, 0, 0), 2)
    # Detect faces by using the cascades
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    # Find a certain area in the frame
    start_x = 0
    start_y = 0