Пример #1
0
def solve_captcha(image_file, model_data):
    #image = cv2.imread(image_file)
    (model, graph, lb) = model_data
    letter_images = preprocess_image(image_file)

    if not letter_images:
        return ""

    #create ảnh output và ds các ký tự dự đoán
    #output = cv2.merge([image]*3)
    predictions = []

    for letter_image in letter_images:
        letter_image = resize_image(letter_image, 20, 20)

        letter_image = np.expand_dims(letter_image, axis=2)
        letter_image = np.expand_dims(letter_image, axis=0)
        with graph.as_default():
            #yêu cầu NR đưa ra dự đoán
            prediction = model.predict(letter_image)
#chuyển ký tự đc dự đoán thành ký str
        letter = lb.inverse_transform(prediction)[0]
        predictions.append(letter)

    captcha_text = "".join(predictions)
    #cv2.imshow("OUT",output)
    #cv2.waitKey()
    return captcha_text
Пример #2
0
 def face_predict(self, image):
     # 依然是根据后端系统确定维度顺序
     if K.image_data_format()== 'channels_first' and image.shape != (1, 3, IMAGE_SIZE, IMAGE_SIZE):
         image = resize_image(image)  # 尺寸必须与训练集一致都应该是IMAGE_SIZE x IMAGE_SIZE
         image = image.reshape((1, 3, IMAGE_SIZE, IMAGE_SIZE))  # 与模型训练不同,这次只是针对1张图片进行预测
     elif K.image_data_format() == 'channels_last' and image.shape != (1, IMAGE_SIZE, IMAGE_SIZE, 3):
         image = resize_image(image)
         image = image.reshape((1, IMAGE_SIZE, IMAGE_SIZE, 3))
         # 浮点并归一化
     image = image.astype('float32')
     image /= 255
     # 给出输入属于各个类别的概率,我们是二值类别,则该函数会给出输入图像属于0和1的概率各为多少
     result = self.model.predict_proba(image)
     print('result:', result)
     # 给出类别预测:0或者1
     result = self.model.predict_classes(image)
     # 返回类别预测结果
     return result[0]
Пример #3
0
def test_1(datafiles):
    """Test of image resize."""
    for image_path in datafiles.listdir():
        image = Image.open(str(image_path))
        new_full_path = resize_image(image, output_path, 100, 100)
        expected_full_path = os.path.join(
            output_path,
            os.path.basename(str(image_path)).replace('.jpg', '_100x100.jpg'))
        assert new_full_path == expected_full_path
        assert os.path.exists(expected_full_path)
Пример #4
0
# urls = ["https://www.juniqe.de/a-hero-premium-poster-portrait-2674808.html?_sp=c5abf008-92e6-4178-933b-0b317361bccd.1571860545825",
# "https://www.juniqe.de/wild-world-premium-poster-portrait-2841204.html?_sp=c5abf008-92e6-4178-933b-0b317361bccd.1571860722313"]

# for url in urls:
print("Fetch URL:", url)
html = get(url)
soup = Soup(html)

result = soup.find("div", {"class": "filter-by-design-header__image-wrap"})
result = result.find("img")

path_to_image_sm = result.attrs["src"]
path_to_image_big = path_to_image_sm.replace("x386", "x1200")

filename = path_to_image_big.rsplit("/")[-1]
path = os.path.join(folder_download, filename)
print(filename, "->", path)

print("Save Image:", filename)
urllib.request.urlretrieve(path_to_image_big, path)

filename_in = path
filename_out = os.path.join(folder_save, filename)
new_width = 3000

print("Resize Image:", filename)
resize.resize_image(filename_in, filename_out, new_width)

print("done")
Пример #5
0
OUTPUT_FOLDER = "output_char"
MODEL_FILENAME = "model/captcha_model.hdf5"
MODEL_LABELS_FILENAME = "model/model_labels.dat"

data = []
labels = []

for image_file in paths.list_images(OUTPUT_FOLDER):
    #load hinh anh va convert qua grayscale

    image = cv2.imread(image_file)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    #resize letter sang 20x20 pixel

    image = resize_image(image, 20, 20)

    #add thong so thu 3 cho Keras

    image = np.expand_dims(image, axis=2)
    # lấy tên chữ cái dựa vào thư mục

    label = image_file.split(os.path.sep)[-2]
    #add ký tự vào nhãn vào data train

    data.append(image)
    labels.append(label)

data = np.array(data, dtype="float") / 255.0
labels = np.array(labels)
Пример #6
0
Original file is located at
    https://colab.research.google.com/drive/13J2npGWlfMIHhhtIb-emuDB5EI_HlFg4
"""

from skimage.measure import compare_ssim as ssim
import numpy as np
import cv2
from IPython.display import Image

pip install resize

from resize import resize_image

img = cv2.imread("img.jpg")
img2= cv2.imread("img2.jpg")

img,img2 = resize_image(img,img2)
a = before.shape
b = after.shape

grayA = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
grayB = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)



(score, diff) = ssim(grayA, grayB, full=True)
diff = (diff * 255).astype("uint8")
print("SSIM: {}".format(score))

Пример #7
0
from skimage.measure import compare_ssim
import cv2
from datetime import datetime
import numpy as np
from resize import resize_image
import sys

img1 = sys.argv[1]
img2 = sys.argv[2]

before = cv2.imread(img1)
after = cv2.imread(img2)

before, after = resize_image(before, after)

# Convert images to grayscale
before_gray = cv2.cvtColor(before, cv2.COLOR_BGR2GRAY)
after_gray = cv2.cvtColor(after, cv2.COLOR_BGR2GRAY)

# Compute SSIM between two images
(score, diff) = compare_ssim(before_gray, after_gray, full=True)
# print("Image similarity", score)

# The diff image contains the actual image differences between the two images
# and is represented as a floating point data type in the range [0,1]
# so we must convert the array to 8-bit unsigned integers in the range
# [0,255] before we can use it with OpenCV
diff = (diff * 255).astype("uint8")

# Threshold the difference image, followed by finding contours to
# obtain the regions of the two input images that differ
Пример #8
0
def main(resize_max_length_c, top_x_features_c, top_x_likeness_c, top_x_limit_c, face_scale_c, face_neighbors_c, face_min_c, face_resize_c, blur_threshold_c):
    """ This program takes a directory path of images and processes them for cull flags
    ********************************
    1. Take directory path for Smart Previews. DONE
    2. Recursively parse directory into List. DONE
    3. Convert DNG to JPG. DONE
    4. Resize images smaller. DONE
    5. Process list of images for Duplicates. 
    6. Process list of images for Faces. DONE
    7. Process list of images for Blurs. DONE
    8. Process list of images for Blinks. DONE
    9. Process list of images for Distortions (later feature).
    10. Convert list to CSV for interpretation by other programs. DONE

      Typical usage example:

      Python3 app.py $path_to_smart_preview_directory 
      python3 app.py /Applications/XAMPP/xamppfiles/htdocs/AI-Cull/images/dngs-structured-small

    Will store intermediary steps and performance in the output folder 
    ********************************
    """

    # Constants for CSV header lookup so we don't have to use indexes, sort in order of csv row
    CONST_KEY = 'Key'  # 0
    CONST_FILE = 'File'  # 1
    CONST_LOCATION = 'Location'  # 2
    CONST_JPG = 'JPG'  # 3
    CONST_JPG_LOCATION = 'JPG Location'  # 4
    CONST_SCALED_JPG = 'Scaled JPG'  # 5
    CONST_FEATURES = 'Features'  # 6
    CONST_VECTOR = 'Vector'  # 7
    CONST_NEIGHBORS = 'Neighbors'  # 8
    CONST_FACES = 'Faces'  # 9
    CONST_FOCUS = 'Focus'  # 10
    CONST_BLURRY = 'Blurry'  # 11
    CONST_BLINKS = 'Blinks'  # 12

    # Instance variables
    args = sys.argv  # take in args from system
    arg_len = len(args)  # check how many args came in
    run_at = time_util.timestamp_simple()
    csv_title = 'Results_' + run_at  # title of csv file being output
    csv_performance = "All_Performance.csv"  # title of performance csv
    csv_columns = []  # initialize column headers for csv
    list_data = []  # inititalize list data as empty
    progress = 1  # initialize progress to 1st item
    queue = 0  # initialize size of queue to 0
    root_dir = os.getcwd()  # initialize root directory location
    verbose = 0

    # clean Up Old Run Before Starting New One
    # clean_up.folder('converted_jpgs')
    # clean_up.folder('faces/individuals')
    # clean_up.folder('faces/overlays')
    # clean_up.folder('image_vectors')
    # clean_up.folder('nearest_neighbors')
    # clean_up.folder('scaled_bw_jpgs')
    # print(time_util.timestamp() + '[INFO] Clean Up Done') # Print path given

    # Performance Metrics
    # this is the long edge size of our resized images
    resize_max_length = resize_max_length_c
    top_x_features = top_x_features_c
    top_x_likeness = top_x_likeness_c
    top_x_limit = top_x_limit_c
    face_scale = face_scale_c
    face_neighbors = face_neighbors_c
    face_min = face_min_c
    face_resize = face_resize_c
    face_cascade = 'haarcascade_frontalface_default.xml'  # not implemented yet
    blur_threshold = blur_threshold_c
    eyes_min = 20  # not implemented in blinks
    image_count = 0
    face_found = 0
    eyes_found = 0  # not implemented in blinks
    blinks_found = 0
    blurs_found = 0

    # Total Time Amounts for Each Step
    total_time = 0.0
    queue_time = 0.0
    convert_time = 0.0
    resize_time = 0.0
    feature_time = 0.0
    likeness_time = 0.0
    face_time = 0.0
    blink_time = 0.0
    blur_time = 0.0
    csv_time = 0.0

    # Set arg_path if there are args
    if (len(args) >= 2):
        arg_path = args[1]  # sets arg path for use later

    # arg index 0 is filename, index 1 is the first option. If no args explain correct usage
    if (arg_len != 2):
        # Explain correct usage
        print("\n")
        print(time_util.timestamp(
        ) + '[ERROR] No path given, correct usage: Python3 %s $image_directory' % args[0])
        print(time_util.timestamp(
        ) + '[Example] Python3 app.py /Applications/XAMPP/xamppfiles/htdocs/AI-Cull/images/dngs-structured-small')
        print('*******************************************')
        print('*************** PROGRAM END ***************')
        print('*******************************************')
        quit()  # quit program if being used incorrectly

    track_total_time = time_util.time_tracker()  # start tracking time

    if(verbose == 1):
        # Confirm path given in args
        # Print path given
        print(time_util.timestamp() +
              '[INFO] You gave directory path:' + arg_path)
        print("\n")
        print('*******************************************')

        """ Let user know that we are generating a queue of "n" size based on the contents of folder path given
        ********************************
        INPUT = directory of files
        1. This code block will look through the path given and find all dng files
        OUTPUT = List Data is updated with file and location of the dng files
        PRINTS = Each time item is added to queue for processing
        ********************************
        """
        print("\n")
        print(time_util.timestamp() +
              '[STEP] Genearting Queue of files from directory path...')
        print("\n")

    # create the output/run_at folder if it does not exist
    if not os.path.exists('output/'):
        if(verbose == 1):
            print(time_util.timestamp() +
                  "[INFO] Missing output folder, creating it...")
        os.makedirs('output/')

    # create the output/run_at folder if it does not exist
    if not os.path.exists('output/' + run_at):
        if(verbose == 1):
            print(time_util.timestamp() +
                  "[INFO] Missing output/run_at folder, creating it...")
        os.makedirs('output/' + run_at)

    # Add headers for File and Location
    csv_columns.append(CONST_KEY)
    csv_columns.append(CONST_FILE)
    csv_columns.append(CONST_LOCATION)

    track_queue_time = time_util.time_tracker()  # start tracking time
    # Add data for File and Location
    xi = 0
    for root, dirs, files in os.walk(arg_path):
        for _file in files:
            if xi < 10:
                if (fnmatch.fnmatch(_file, '*.dng') or fnmatch.fnmatch(_file, '*.jpg')):
                    xi = xi + 1
                    print(f'{xi + 1} file added to queue')
                    key = _file[:-4]  # is the non extension name
                    add_item = [key, os.path.join(
                        _file), os.path.join(root, _file)]
                    list_data.append(add_item)
                    if(verbose == 1):
                        print(time_util.timestamp() +
                              "[INFO] Added to Queue: " + key)
                    queue = queue + 1

    total_images = queue  # set queue size

    print(time_util.timestamp() + "[INFO] Queue Size: " + str(queue))
    print("\n")
    print(time_util.timestamp(
    ) + "[INFO] Total Time to generate queue: " + str(track_queue_time.total_time()))

    # how long did it take to generate queue
    queue_time = track_queue_time.total_time()

    if(verbose == 1):
        print("\n")
        print('*******************************************')
        """ Convert DNG to JPG
        ********************************
        INPUT = queue of files in list data
        1. This code block will look through the queue in list data and convert dngs to jpg
        OUTPUT = JPG files from the DNGs
        OUTPUT = List Data is updated with file and location of the jpg files
        PRINTS = Each time item is converted to jpg
        ********************************
        """
        print("\n")
        print(time_util.timestamp() +
              '[STEP] Converting DNGs to JPG from queue...')
        print("\n")

    progress = progress  # this doesn't do anything it is just to keep pattern

    # Add header for jpg file and location
    csv_columns.append(CONST_JPG)
    csv_columns.append(CONST_JPG_LOCATION)

    track_convert_time = time_util.time_tracker()  # start tracking time

    # create the output/ folder if it does not exist
    if not os.path.exists('output/converted_jpgs'):
        #print(time_util.timestamp() + "[INFO] Missing output/converted_jpgs folder, creating it...")
        os.makedirs('output/converted_jpgs')

    # create the output/run_at folder if it does not exist
    if not os.path.exists('output/' + run_at + '/converted_jpgs'):
        #print(time_util.timestamp() + "[INFO] Missing output/run_at/converted_jpgs folder, creating it...")
        os.makedirs('output/' + run_at + '/converted_jpgs')

    # Convert all DNGs to JPG
    for row in list_data:
        name = row[csv_columns.index(CONST_KEY)]
        dng_to_jpg.convert(
            run_at, row[csv_columns.index(CONST_LOCATION)], name)  # dng path
        if(verbose == 1):
            print(time_util.timestamp(
            ) + "[INFO] DNG to JPG (" + str(progress) + " of " + str(queue) + "): " + name)
        progress = progress + 1

        # Add New Data
        row.append(name + '.jpg')
        # Convert DNG every time row.append("output/"+ str(run_at) + "/converted_jpgs/" + name + ".jpg")
        # Convert DNG once
        row.append("output/converted_jpgs/" + name + ".jpg")

    print("\n")
    print(time_util.timestamp(
    ) + "[INFO] Total Time to Convert DNG to JPG: " + str(track_convert_time.total_time()))
    # how long did it take to convert to JPG
    convert_time = track_convert_time.total_time()
    print("\n")

    if(verbose == 1):
        print('*******************************************')
        """ Scale down JPG and Convert to Grayscale
        ********************************
        INPUT = queue of files in list data
        1. This code block will look through the queue in list data and resize down jpgs
        OUTPUT = Scaled / BW JPG files
        OUTPUT = List Data is updated with file and location of the jpg files
        PRINTS = Each time item is converted to jpg
        ********************************
        """
        print("\n")
        print(time_util.timestamp() + '[STEP] Converting JPG to ' + str(
            resize_max_length) + 'px long edge and grayscale from queue...')
        print("\n")

    progress = 1  # reset progress for this step

    # Add header for scale bw jpgs
    csv_columns.append(CONST_SCALED_JPG)

    track_resize_time = time_util.time_tracker()  # start tracking time

    # Black white and resize all JPG
    for row in list_data:
        name = row[csv_columns.index(CONST_KEY)]  # name key
        resize.resize_image(run_at, row[csv_columns.index(
            CONST_JPG_LOCATION)], name, resize_max_length)  # jpg path
        if(verbose == 1):
            print(time_util.timestamp(
            ) + "[INFO] BW Resize Complete on (" + str(progress) + " of " + str(queue) + "): " + name)
        progress = progress + 1

        # Add New Data
        # if scaling every time
        row.append("output/" + str(run_at) +
                   "/scaled_bw_jpgs/" + name + "_bw.jpg")
        # if scaling just one time
        # row.append("output/scaled_bw_jpgs/" + name + "_bw.jpg")

    print("\n")
    print(time_util.timestamp() +
          "[INFO] Total Time to Scale JPG: " + str(track_resize_time.total_time()))
    resize_time = track_resize_time.total_time()  # how long did it take to resize
    print("\n")

    if(verbose == 1):
        print('*******************************************')
        """ Run Feature Detection to Vectorize Images for Comparison
        ********************************
        INPUT = directory of files
        1. This code block will look through the queue in list data and find features (things)
        OUTPUT = image vectors based on feature detection
        OUTPUT = List Data is updated with feature confidence
        PRINTS = Each time feature is found
        ********************************
        """
        print("\n")
        print(time_util.timestamp() +
              '[STEP] Running Feature detection on scaled JPGs...')
        print("\n")

    progress = 1  # reset progress for this step

    # Add headers for duplicates data
    # csv_columns.append(CONST_FEATURES)
    csv_columns.append(CONST_VECTOR)

    track_feature_time = time_util.time_tracker()  # start tracking time

    # Run whole directory at once
    # subprocess.run(["python", "classify_images.py", arg_path])
    list_data = classify_images.classify_image(run_at, list_data, csv_columns)

    print("\n")
    print(time_util.timestamp(
    ) + "[INFO] Total Time to Feature Detect: " + str(track_feature_time.total_time()))
    # how long did it take to generate features
    feature_time = track_feature_time.total_time()
    print("\n")
    print(list_data)
    if(verbose == 1):
        print('*******************************************')
        """ Run Nearest Neighbor for Likeness Comparison
        ********************************
        INPUT = directory of files
        1. This will check the image_vectors directory and return json list of nearest neighboors
        OUTPUT = json file of nearest neighbor
        OUTPUT = List data of nearest neighbor
        PRINTS = Each json object
        ********************************
        """
        print("\n")
        print(time_util.timestamp() + '[STEP] Running nearest ' + str(
            top_x_likeness) + ' likeness detection on image vectors...')
        print("\n")

    progress = 1  # reset progress for this step

    # Add header for duplicates data
    csv_columns.append(CONST_NEIGHBORS)

    track_likeness_time = time_util.time_tracker()  # start tracking time

    # Duplicates detection will find vectorize based on feature detect
    # Images with the same features at similar confidence levels will be believed to be the same
    # Add to list an array of duplicate data

    # Run whole directory at once
    list_data = cluster_vectors.nearest_neighboors(
        run_at, list_data, csv_columns, top_x_likeness, top_x_limit)

    print("\n")
    print(time_util.timestamp(
    ) + "[INFO] Total Time to Likeness Detect: " + str(track_likeness_time.total_time()))
    # how long did it take to generate likeness data
    likeness_time = track_likeness_time.total_time()
    print("\n")

    # """ TEMP DISABLED as not needed for APP 3"""
    # if(verbose == 1):
    #     print('*******************************************')
    #     """ #Find faces in images and save cropped faces
    #     #********************************
    #     #INPUT = directory of files
    #     #1. This code block will look through the queue in list data and find faces
    #     #OUTPUT = face cropped jpgs
    #     #OUTPUT = List Data is updated with face details
    #     #PRINTS = Each time face is found
    #     #********************************
    #     """
    #     print("\n")
    #     print(time_util.timestamp() + '[STEP] Running Face detection on converted full size JPGs...')
    #     print("\n")

    # progress = 1 # reset progress for this step

    # # Add header for faces data
    # csv_columns.append(CONST_FACES)

    # track_face_time = time_util.time_tracker() # start tracking time

    # # create the output/run_at folder if it does not exist
    # if not os.path.exists('output/' + run_at + '/faces'):
    #     #print(time_util.timestamp() + "[INFO] Missing output/run_at/faces folder, creating it...")
    #     os.makedirs('output/' + run_at + '/faces')

    # # create the output/run_at folder if it does not exist
    # if not os.path.exists('output/' + run_at + '/faces/individuals/'):
    #     #print(time_util.timestamp() + "[INFO] Missing output/run_at/faces/individuals/ folder, creating it...")
    #     os.makedirs('output/' + run_at + '/faces/individuals/')

    # # create the output/run_at folder if it does not exist
    # if not os.path.exists('output/' + run_at + '/faces/overlays/'):
    #     #print(time_util.timestamp() + "[INFO] Missing output/run_at/ffaces/overlays/ folder, creating it...")
    #     os.makedirs('output/' + run_at + '/faces/overlays/')

    # # Face images will be used for both blur and blink detection
    # # Save all faces to their own JPGs
    # # Add to list an array of face photo locations
    # # TODO add profile face detection
    # # TODO eventually move to a RNN model rather than haarcascade
    # for row in list_data:
    #     name = row[csv_columns.index(CONST_KEY)] # name key
    #     faces = face_detect.get_faces(run_at, row[csv_columns.index(CONST_SCALED_JPG)], name, face_scale, face_neighbors, face_min, face_resize) # jpg path
    #     if(verbose == 1):
    #         print(time_util.timestamp() + "[INFO] Face Detection Complete on (" + str(progress) + " of " + str(queue) +"): " + name)
    #     progress = progress + 1

    #     face_num = len(faces)
    #     # Add New Data
    #     row.append(faces) # faces
    #     face_found = face_found + int(face_num)
    # print(time_util.timestamp() + "[INFO] Total Faces Found: " + str(face_found))

    # print("\n")
    # print(time_util.timestamp() + "[INFO] Total Time to Face Detect: " + str(track_face_time.total_time()))
    # face_time = track_face_time.total_time() # how long did it take to generate likeness data
    # print("\n")

    # """ TEMP DISABLED do not need for APP3 """

    # if(verbose == 1):
    #     print('*******************************************')
    #     """ #Check all faces found if they are blurry
    #     #********************************
    #     #INPUT = directory of files
    #     #1. This code block will look through the queue in list data and determine if faces are blurry
    #     #OUTPUT = focus measure and blur decision
    #     #PRINTS = Each time a photo is reviewed
    #     #********************************
    #     """
    #     print("\n")
    #     print(time_util.timestamp() + '[STEP] Running Blur Detection...')
    #     print("\n")

    # progress = 1 # reset progress for this step

    # # Add header for Blur Check
    # csv_columns.append(CONST_FOCUS)
    # csv_columns.append(CONST_BLURRY)

    # track_blur_time = time_util.time_tracker() # start tracking time

    # # Blur Check Images
    # # TODO also check profile faces
    # # ADD RNN model rather than laplacian only (need to categorize the cropped faces we have)
    # for row in list_data:

    #     blur_check = "image" # faces or image

    #     faces = row[csv_columns.index(CONST_FACES)] # List of faces
    #     # Ex faces
    #     # [
    #     #   ['0AC0C99D-0B40-4331-AC92-CF6BC9C60D34_Face_1_196x196_faces', 'faces/individuals/0AC0C99D-0B40-4331-AC92-CF6BC9C60D34_Face_1_196x196_faces.jpg', '196', '196'],
    #     #   ['0AC0C99D-0B40-4331-AC92-CF6BC9C60D34_Face_2_194x194_faces', 'faces/individuals/0AC0C99D-0B40-4331-AC92-CF6BC9C60D34_Face_2_194x194_faces.jpg', '194', '194'],
    #     #   ['0AC0C99D-0B40-4331-AC92-CF6BC9C60D34_Face_3_90x90_faces', 'faces/individuals/0AC0C99D-0B40-4331-AC92-CF6BC9C60D34_Face_3_90x90_faces.jpg', '90', '90']
    #     #]

    #     focus = 0
    #     blurry = 0
    #     blurry_faces = 0
    #     name = row[csv_columns.index(CONST_KEY)] # name key
    #     original = row[csv_columns.index('Scaled JPG')]

    #     if( len(faces) > 0 and blur_check == "faces"):
    #         for face in faces:
    #             face_result = blur_detect.check_image_for_blur(run_at, name, face[0], original, face[1], blur_threshold) # index 1 is the location of the face file

    #             # Add New Data for Focus and Blur
    #             face.append("%0.2f" % face_result[0]) # focus
    #             face.append(face_result[1]) # blur

    #             focus = focus + int(face_result[0]) # add up all focus results
    #             blurry_faces = blurry_faces + int(face_result[1]) # add up number of blurry faces

    #         focus = focus / len(faces) # average focus measure

    #     else:
    #         # Check the whole image if no faces found or if blur_check is NOT set to faces
    #         image_result = blur_detect.check_image_for_blur(run_at, name, name, original, original, blur_threshold) # index 1 is the location of the face file

    #         focus = focus + int(image_result[0]) # add up all focus results
    #         blurry = blurry + int(image_result[1]) # add up number of blurry faces

    #     # If focus was calculated
    #     if( focus>0 ):
    #         row.append("%0.0f" % focus) # focus average

    #         # If average focus for the image is below threshold then image is blurry, save sample
    #         if( focus < blur_threshold):

    #             # create the output/run_at folder if it does not exist
    #             if not os.path.exists('output/' + run_at + "/blurs/blurry_images/" + str("%0.2f" % focus) + '_' +  name + '.jpg'):
    #                 copyfile(original, 'output/' + run_at + "/blurs/blurry_images/" + str("%0.2f" % focus) + '_' +  name + '.jpg')
    #         else:
    #             # create the output/run_at folder if it does not exist
    #             if not os.path.exists('output/' + run_at + "/blurs/not_blurry_images/" + str("%0.2f" % focus) + '_' +  name  + '.jpg'):
    #                 copyfile(original, 'output/' + run_at + "/blurs/not_blurry_images/" + str("%0.2f" % focus) + '_' + name  + '.jpg')
    #     # If focus was NOT calculated
    #     else:
    #         row.append('') # focus

    #     row.append(blurry) # blur
    #     blurs_found = blurs_found + blurry

    #     if(verbose == 1):
    #         print(time_util.timestamp() + "[INFO] Blur Check Complete on (" + str(progress) + " of " + str(queue) +"): " + name)
    #     progress = progress + 1

    # print("\n")
    # print(time_util.timestamp() + "[INFO] Total Time to Blur Detect: " + str(track_blur_time.total_time()))
    # blur_time = track_blur_time.total_time() # how long did it take to generate likeness data
    # print("\n")

    # """ TEMP DISABLED DO NOT NEED for APP3 """

    # if(verbose == 1):
    #     print('*******************************************')
    #     """ #Check all faces found if they are blinking
    #     #********************************
    #     #INPUT = directory of files
    #     #1. This code block will look through the queue in list data and determine if faces are blinking
    #     #OUTPUT = blink decision decision
    #     #PRINTS = Each time a photo is reviewed
    #     #********************************
    #     """
    #     print("\n")
    #     print(time_util.timestamp() + '[STEP] Running Blink Detection on all faces found...')
    #     print("\n")

    # progress = 1 # reset progress for this step

    # # Add header for Blink Check
    # csv_columns.append(CONST_BLINKS)

    # track_blink_time = time_util.time_tracker() # start tracking time

    # # Run whole directory at once
    # results = blink_detect.check_list_for_blink(run_at, list_data, csv_columns, face_cascade, face_scale, face_neighbors, face_min)

    # eyes_found = results[0]
    # blinks_found = results[1]
    # list_data = results[2]

    # """
    # # Move Blink Detection to BULK
    # # Blink Check Images
    # # TODO also check profile faces
    # """
    # for row in list_data:
    #     faces = row[csv_columns.index(CONST_FACES)] # List of faces
    #     # Ex faces
    #     # [
    #     #   ['0AC0C99D-0B40-4331-AC92-CF6BC9C60D34_Face_1_196x196_faces', 'faces/individuals/0AC0C99D-0B40-4331-AC92-CF6BC9C60D34_Face_1_196x196_faces.jpg', '196', '196'],
    #     #   ['0AC0C99D-0B40-4331-AC92-CF6BC9C60D34_Face_2_194x194_faces', 'faces/individuals/0AC0C99D-0B40-4331-AC92-CF6BC9C60D34_Face_2_194x194_faces.jpg', '194', '194'],
    #     #   ['0AC0C99D-0B40-4331-AC92-CF6BC9C60D34_Face_3_90x90_faces', 'faces/individuals/0AC0C99D-0B40-4331-AC92-CF6BC9C60D34_Face_3_90x90_faces.jpg', '90', '90']
    #     #]

    #     blinks = 0
    #     name = row[csv_columns.index(CONST_KEY)] # name key

    #     if( len(faces) > 0):
    #         for face in faces:
    #             face_result = blink_detect.check_image_for_blink(face[1], face_cascade, face_scale, face_neighbors, face_min) # index 1 is the location of the face file

    #             # Add New Data for confidence and blink
    #             face.append("%0.2f" % face_result[0]) # blink confidence
    #             face.append(face_result[1]) # blink count

    #             blinks = blinks + int(face_result[1])

    #     row.append(blinks) # focus
    #     blinks_found = blinks_found + blinks

    #     print(time_util.timestamp() + "[INFO] Blink Check Complete on (" + str(progress) + " of " + str(queue) +"): " + name)
    #     progress = progress + 1

    # """ TEMP DISABLED NOT NEEDED in APP3 """

    # print("\n")
    # print(time_util.timestamp() + "[INFO] Total Time to Blink Detect: " + str(track_blink_time.total_time()))
    # blink_time = track_blink_time.total_time() # how long did it take to generate likeness data
    # print("\n")

    if(verbose == 1):
        print('*******************************************')
        """ Output data to CSV
        ********************************
        INPUT = list data
        1. This code block will convert list data into csv format
        OUTPUT = csv
        PRINTS = name of csv
        ********************************
        """
        print("\n")
        print(time_util.timestamp() + "[STEP] Generating CSV...")
        print("\n")

    # Convert headers list to string
    listToStr = ' '.join(map(str, csv_columns))
    if(verbose == 1):
        # Display csv headers
        print(time_util.timestamp() + "[INFO] CSV Headers: " + listToStr)

    # Convert list to csv
    track_csv_time = time_util.time_tracker()  # start tracking time
    csv_generator.list_to_csv(csv_title, csv_columns, list_data)
    print(time_util.timestamp() +
          '[INFO] Your CSV File is: ' + csv_title + ".csv")
    print(time_util.timestamp() +
          "[INFO] Total Time to generate csv: " + str(track_csv_time.total_time()))
    # how long did it take to generate likeness data
    csv_time = track_csv_time.total_time()

    print(time_util.timestamp() +
          "[INFO] Total Time to Run AI: " + str(track_total_time.total_time()))
    # how long did it take to generate likeness data
    total_time = track_total_time.total_time()

    # Log Performance
    performance_tracking_headers = ["run_at", "image_count", "resize_max_length", "top_x_features", "top_x_likeness", "top_x_limit", "face_scale", "face_neighbors", "face_min", "face_resize", "face_cascade", "face_found", "blur_threshold",
                                    "eyes_min", "eyes_found", "blinks_found", "blurs_found", "Total Time", "Generate Queue Time", "Convert to JPG Time", "Resize JPG Time", "Feature Time", "Likeness Time", "Face Time", "Blink Time", "Blur Time", "Csv Time"]

    try:
        f = open("output/" + csv_performance)
    except IOError:
        print(time_util.timestamp() +
              '[ERROR] Could not find performance CSV, creating new one...')
        # this will always add headers which will need to be scrubbed in review
        csv_generator.append_to_csv(
            "all_performance.csv", performance_tracking_headers)
        f = open("output/" + csv_performance)
    finally:
        f.close()

    performance_row = [run_at, queue, resize_max_length, top_x_features, top_x_likeness, top_x_limit, face_scale, face_neighbors, face_min, face_resize, face_cascade, face_found,
                       blur_threshold, eyes_min, eyes_found, blinks_found, blurs_found, total_time, queue_time, convert_time, resize_time, feature_time, likeness_time, face_time, blink_time, blur_time, csv_time]
    # this will always add headers which will need to be scrubbed in review
    csv_generator.append_to_csv("all_performance.csv", performance_row)

    # This is the end of the porgram
    print('*******************************************')
    print('*************** PROGRAM END ***************')
    print('*******************************************')
Пример #9
0
def create_new_user(image_name):
    resize_image(image_name)
    save_dat(image_name)
Пример #10
0
def handle_resize(event, context):
    """Handle an S3 event on the target bucket to resize and save to destination bucket

    Example Event:
    {
      "Records": [
        {
          "eventVersion": "2.0",
          "eventTime": "1970-01-01T00:00:00.000Z",
          "requestParameters": {
            "sourceIPAddress": "127.0.0.1"
          },
          "s3": {
            "configurationId": "testConfigRule",
            "object": {
              "eTag": "0123456789abcdef0123456789abcdef",
              "sequencer": "0A1B2C3D4E5F678901",
              "key": "HappyFace.jpg",
              "size": 1024
            },
            "bucket": {
              "arn": "arn:aws:s3:::mybucket",
              "name": "sourcebucket",
              "ownerIdentity": {
                "principalId": "EXAMPLE"
              }
            },
            "s3SchemaVersion": "1.0"
          },
          "responseElements": {
            "x-amz-id-2": "EXAMPLE123/5678abcdefghijklambdaisawesome/mnopqrstuvwxyzABCDEFGH",
            "x-amz-request-id": "EXAMPLE123456789"
          },
          "awsRegion": "us-east-1",
          "eventName": "ObjectCreated:Put",
          "userIdentity": {
            "principalId": "EXAMPLE"
          },
          "eventSource": "aws:s3"
        }
      ]
    }

    :param event: Information of the event which triggered the invocation of this function
    :type event: dict
    :param context: The invocation context
    :type context: dict
    """
    # Obtain the bucket name and key for the event
    bucket_name = event['Records'][0]['s3']['bucket']['name']
    key_path = event['Records'][0]['s3']['object']['key']

    # Retrieve the S3 Object
    s3_connection = boto3.resource('s3')
    s3_object = s3_connection.Object(bucket_name, key_path)

    response = s3_object.get()

    # Perform the resize operation
    with Image(blob=response['Body'].read()) as image:
        resized_image = resize_image(image, 400, 400)
        resized_data = resized_image.make_blob()

    # And finally, upload to the resize bucket the new image
    s3_resized_object = s3_connection.Object('test-resize', key_path)
    s3_resized_object.put(ACL='authenticated-read', Body=resized_data)

    # Finally remove, as the bucket is public and we don't want just anyone dumping the list of our files!
    s3_object.delete()
Пример #11
0
import random as rn
from PIL import Image
from tkinter import Tk
from tkinter.filedialog import askopenfilename

pg.init()

# All'inizio chiedo all'utente l'immagine di sfondo
while 1:
    try:
        Tk().withdraw()
        filename = askopenfilename(title='Scegli un\'immagine',
                                   filetypes=[('Images', '*.png *.jpg *.jpeg'),
                                              ('All files', '*.*')])
        file = resize_image(Image.open(filename), 600)
        file = pg.image.frombuffer(file.tobytes(), file.size, file.mode)
        break
    except OSError:
        os_command('cancella lo schermo')
        print('Immagine non riconosiuta, riprova')
    except AttributeError:
        sys.exit()

screen = pg.display.set_mode((600, 600))
pg.display.set_icon(pg.image.load(filename))

# Per trovare il titolo della finestra tutto le cartelle,
# Infine tolgo l'estensione del file
cap = filename
while cap.find('/') != -1: