Example #1
0
def auto_fix(im, noise_removal=False):
    im = auto_crop_hsv(im)
    im = auto_crop_hsv(im, crop_white=True)
    min_face_size = (int(im.shape[0] * 0.05), int(im.shape[1] * 0.05))
    faces = face_detect.detect_faces(im,
                                     min_neighbors=5,
                                     min_size=min_face_size,
                                     max_size=None)
    # keep faces above the fold
    faces = [face for face in faces if face[1] < im.shape[0] * 0.4]
    # find dresses
    dresses = [dress_box2(face, im.shape[:2]) for face in faces]
    # if len(dresses) > 0:
    #     print('grabcut!')
    #     im = grabCut(im, bounding_box(dresses+faces))
    if len(faces) > 0:
        im = crop_to_human(im, faces, dresses)
    # limit max size (after cropping)
    im = fit_in(im, 1800, 1200)
    if noise_removal:
        im = cv2.fastNlMeansDenoisingColored(im)
    im = face_detect.skin_detect2(im, marks=True)
    # im = simplest_color_balance(im)
    # print('retinex starting...')
    # im = colorcorrect.algorithm.retinex_with_adjust(im)
    # print('retinex complete.')
    #face_detect.draw_boxes(im, faces)
    #face_detect.draw_boxes(im, dresses, (255, 0, 0))
    # face_detect.draw_boxes(im, people, (255, 0, 0))
    return im, faces
Example #2
0
def auto_fix(im, noise_removal=False):
    im = auto_crop_hsv(im)
    im = auto_crop_hsv(im, crop_white=True)
    min_face_size = (int(im.shape[0]*0.05), int(im.shape[1]*0.05))
    faces = face_detect.detect_faces(im, min_neighbors=5, min_size=min_face_size, max_size=None)
    # keep faces above the fold
    faces = [face for face in faces if face[1] < im.shape[0]*0.4]
    # find dresses
    dresses = [dress_box2(face, im.shape[:2]) for face in faces]
    # if len(dresses) > 0:
    #     print('grabcut!')
    #     im = grabCut(im, bounding_box(dresses+faces))
    if len(faces) > 0:
        im = crop_to_human(im, faces, dresses)
    # limit max size (after cropping)
    im = fit_in(im, 1800, 1200)
    if noise_removal:
        im = cv2.fastNlMeansDenoisingColored(im)
    im = face_detect.skin_detect2(im, marks=True)
    # im = simplest_color_balance(im)
    # print('retinex starting...')
    # im = colorcorrect.algorithm.retinex_with_adjust(im)
    # print('retinex complete.')
    #face_detect.draw_boxes(im, faces)
    #face_detect.draw_boxes(im, dresses, (255, 0, 0))
    # face_detect.draw_boxes(im, people, (255, 0, 0))
    return im, faces
Example #3
0
def detect_text(image_file):
    unlike = ['OF INDIA','INCOME TAX DEPARTMENT','Permanent Account Number','s Name','INDIA','OF INDIA','Birth']
    with open(image_file, 'rb') as image:
         base64_image = base64.b64encode(image.read()).decode()
    url = 'https://vision.googleapis.com/v1/images:annotate?key=AIzaSyAOztXTencncNtoRENa1E3I0jdgTR7IfL0'
    header = {'Content-Type': 'application/json'}
    body = {
        'requests': [{
            'image': {
                'content': base64_image,
            },
            'features': [{
                'type': 'DOCUMENT_TEXT_DETECTION',
                'maxResults': 100,
            }]
        }]
    }

    response = requests.post(url, headers=header, json=body).json()
    text = response['responses'][0]['textAnnotations'][0]['description'] if len(response['responses'][0]) > 0 else ''
    block=str(text).split('\n')
    bca=re.findall(r'\s([a-zA-Z]{5}\d{4}[a-zA-Z0-9]{1})',text)
    noun = re.compile('([a-zA-Z]+ [a-zA-Z]+ [a-zA-Z]+|[a-zA-Z]+ [a-zA-Z]+|[a-zA-Z]+ [a-zA-Z]+ [a-zA-Z]+ [a-zA-Z]+)')
    names = noun.findall(text)
    DOB = re.findall(r'\s(\d{2}\/\d{2}\/\d{4})',text)
    names = [x for x in names if x not in unlike]
    face=detect_faces(image_file,bca[0])
    image_string =''
    if os.path.isfile(face) is True:
        with open(face, 'rb') as image:
            image_string = base64.b64encode(image.read()).decode()
        os.remove(face)
    parsed_birth=str(dateparser.parse(DOB[0],settings={'DATE_ORDER': 'DMY'}).date())
    details = {"name":names[0],"father_name":names[1],"pan_no":bca[0],"date_of_birth":parsed_birth,"pan_face":image_string}
    return details
Example #4
0
def recognize_image(image, model_path, classify=False):
    faceRects = detect_faces(image)
    faces = crop_rects(image, faceRects)

    if classify:
        print('Using 1 model to predict')
        model = load_model(model_path + 'classification_model.h5')
        names = classify_faces(faces, model)
    else:
        print('Using 7 models to predict')
        models = load_models(model_path)
        names = predict_faces(faces, models)

    window_name = "Recognizing faces"
    show_bounding_boxes_and_labels(image, faceRects, names, window_name)
    def post(self):
        #file = request.form

        #base=file['Passport_Image']
        image = request.files['Passport_Image']

        image_string = base64.b64encode(image.read()).decode()

        base = image_string.encode()

        details = detect_text(image_string)
        print("......................:", details)
        strps_base = image_string.strip('')[1]

        # print("=============",image_string)

        # directory =  home +'/'+ date
        #print("-------------------------",directory)
        #if not os.path.exists(directory):
        #print ("path doesn't exist. trying to make")
        #os.mkdir(directory)
        # name = details['Passport_Document_No'] + '_' +details['FamilyName']
        #filename = directory+'/'+name+'.jpeg'  # I assume you have a way of picking unique filenames
        # t = TemporaryFile(filename)
        base = image_string.encode()
        print = ("tnkdfkn:", base)
        filename = home + '/' + 'document.jpeg'  # I assume you have a way of picking unique filenames
        with open(filename, 'wb') as f:
            f.write(base)

    # fullimage_size = ('{:,.0f}'.format(os.path.getsize(filename)/float(1<<10))+" KB")
        face = detect_faces(filename)
        #os.remove(filename)#print("data return",face)
        with open(face, 'rb') as image:
            image_string_face = base64.b64encode(image.read()).decode()
        #print("image_string:",image_string)
        logger.info("Data added successfully to passport")
        details['face'] = image_string_face

        #logger.info("Data added successfully to passport")
        return details
Example #6
0
    def post(self):
        try:
            startlog.info("api call hits")
            file = request.form

            base = file['Passport_Image']
            imgdata = base64.b64decode(base)
            header = file['scan_type']
            details = detect_text(base)
            startlog.info(details)
            unique_no = str(datetime.now())
            if details['type'] == 'PASSPORT':
                no = details['data']['Passport_Document_No']
                unique_no = no[5:]
            elif details['type'] == 'VISA':
                no = details['data']['Visa_Number']
                unique_no = no[5:]
          #  elif details['type']=='partial data':
          #      rand_int=random.randint(0,10000)
          #      unique_no = rand_int
            # I assume you have a way of picking unique filenames
            filename = os.path.join(
                home, 'xxxx'+str(unique_no)+'document.jpeg')
            with open(filename, 'wb') as f:
                f.write(imgdata)

            if header == 'mobile':
                crop = rotate(filename, unique_no)
                fullimage_size = ('{:,.0f}'.format(
                    os.path.getsize(filename)/float(1 << 10))+" KB")
                face = detect_faces(crop, unique_no)
                os.remove(crop)
                os.remove(filename)
            elif header == 'web':
                fullimage_size = ('{:,.0f}'.format(
                    os.path.getsize(filename)/float(1 << 10))+" KB")
                face = detect_faces(filename, unique_no)
                os.remove(filename)

            image_string = ''
            faceimage_size = ''
            if os.path.isfile(face) is True:
                with open(face, 'rb') as image:
                    image_string = base64.b64encode(image.read()).decode()
                faceimage_size = ('{:,.0f}'.format(
                    os.path.getsize(face)/float(1 << 10))+" KB")
                os.remove(face)

            logger.info("Data added successfully to passport")
            details['face'] = image_string
            details['fullimage_size'] = fullimage_size
            details['faceimage_size'] = faceimage_size
            return ({"success": True, "details": details})
        except OSError as e:
            logger.warning(traceback.format_exc())
            return ({"error": str(e), "success": False})
        except IndexError as e:
            logger.warning(traceback.format_exc())
            return ({"message": str(e), "success": False})
        except Exception as e:
            logger.warning(traceback.format_exc())
            return ({"success": False, "message": str(traceback.format_exc())})
Example #7
0
import time
import threading
import queue

start = time.time()
que = queue.Queue()
a = queue.Queue()
b = queue.Queue()
c = queue.Queue()
threads_list = list()
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
image = cv2.imread('images/real_1.jpg')
img_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#Thread 1
t1 = threading.Thread(
    target=lambda q, arg1, arg2: q.put(face_detect.detect_faces(arg1, arg2)),
    args=(que, face_cascade, img_gray))
t1.start()
t1.join()
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
align1 = que.get()
detections = detector(align1, 1)
faces = dlib.full_object_detections()
for det in detections:
    faces.append(predictor(align1, det))
right_eye = [[face.part(i) for i in range(36, 42)] for face in faces]
right_eye = [[(i.x, i.y) for i in eye]
             for eye in right_eye]  # Convert out of dlib format
right_eye = right_eye[0][0]
right_eye_x = right_eye[0]
Example #8
0
import cv2
import numpy as np
import face_detect
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
img1 = cv2.imread("images/real_3.jpg")
# Converting the image into grayscale
gray=cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img = face_detect.detect_faces(face_cascade, gray)
cv2.imshow('aligned image',img)
cv2.waitKey(0)
cv2.destroyAllWindows()