def process_img(filename, reduceby, tolerance, jitters):

    filename = filename.split('/')[-1]
    in_filename = join('/in', filename)

    file_hash = file_digest(in_filename)

    constants = make_constants(filename, file_hash, reduceby, tolerance,
                               jitters)

    people = defaultdict(dict)

    img = cv2.imread(in_filename)
    resized_image = cv2.resize(img, (0, 0),
                               fx=1.0 / reduceby,
                               fy=1.0 / reduceby)

    list_face_locations = face.face_locations(resized_image, 2)
    list_face_encodings = face.face_encodings(resized_image,
                                              list_face_locations, jitters)

    match_to_faces(list_face_encodings, list_face_locations, people,
                   resized_image, -1, constants)

    # finished processing file for faces, write out pickle
    out_file = join('/out', '{0}.face_detected.pickle'.format(filename))
    pickle.dump(people, open(out_file, 'wb'))
    print('Wrote output to ' + out_file)
    sys.stdout.flush()
Exemple #2
0
def process_additional_vectors(additional_vectors):
    if additional_vectors is None or len(additional_vectors) == 0:
        return []
    retval = []
    onlyfiles = [
        f for f in listdir(app.config['UPLOAD_DIR']) if isfile(join(mypath, f))
    ]
    hash2name = {}
    for file in onlyfiles:
        name = file.split('.')[0]
        hash2name['name'] = file

    for a_vector in additional_vectors:
        filename = hash2name[a_vector]

        face_image = cv2.imread(
            os.path.join(app.config['UPLOAD_FOLDER'], filename))

        # get location of face so I can return it to gui.
        list_face_locs = face.face_locations(face_image, 2)
        enc = None

        (top, right, bottom, left) = list_face_locs[0]
        face_array = face_image[top:bottom, left:right]

        if app.config['normalize']:
            # normalize
            list_face_encodings = normalize_faces(face_image, list_face_locs,
                                                  2)
            enc = list_face_encodings[0][0]
        else:
            #not normalize
            enc = face.face_encodings(face_image, list_face_locs)[0]

        loc = list_face_locs[0]

        print('enc_len:', len(enc))
        sys.stdout.flush()

        # make a reference to the vector as a loose hash to the file
        #h = vec2hash(enc)
        h = pic2hash(face_array)
        temp = (loc, enc, h)
        retval.append(temp)
Exemple #3
0
def identify_chips(image, frame_number, reduceby, upsampling):

    resized_image = cv2.resize(image, (0, 0),
                               fx=1.0 / reduceby,
                               fy=1.0 / reduceby)

    # Detect faces with DLib, return aligned chips and bounding boxes
    list_face_locations = face.face_locations(resized_image, upsampling)

    # Align face locations with original image
    transformed_face_locations = [[
        int(face_location[0] * reduceby),
        int(face_location[1] * reduceby),
        int(face_location[2] * reduceby),
        int(face_location[3] * reduceby)
    ] for face_location in list_face_locations]

    frame_with_face_locations = (frame_number, transformed_face_locations)

    return frame_with_face_locations, len(list_face_locations)
Exemple #4
0
def make_tasking(lookupdir, outfilename):
    tasking = defaultdict(dict)
    for tdir in glob.glob(lookupdir):
        encodings = []
        pics = []
        print(tdir)
        for qfile in glob.glob(os.path.join(tdir, '*')):
            print(qfile)
            face_image = cv2.imread(qfile)
            locs = face.face_locations(face_image)
            enc = face.face_encodings(face_image, None)
            if enc and len(enc) >= 1:
                print(qfile)
                top, right, bottom, left = locs[0]
                encodings.append(face.face_encodings(face_image, None)[0])
                cv2.rectangle(face_image, (left, top), (right, bottom),
                              (0, 255, 0), 2)
                pics.append(face_image[top:bottom, left:right])
        key = tdir.split('/')[-1]
        tasking[key]['face_vec'] = encodings
        tasking[key]['pic'] = pics
    pickle.dump(tasking, open(outfilename, 'wb'))
Exemple #5
0
def handle_post_file(additional_vectors):
    # get the image if it exists
    retval = []

    print('******************************')
    print('request.files:', request.files)
    print('request.files.keys:', list(request.files.keys()))
    sys.stdout.flush()

    for file_key in request.files.keys():

        # get the filename
        file = request.files[file_key]

        # keep the same extension on the file
        extension = file.filename.split('.')[-1]

        #@TODO figure out how make file contents the hash
        file_time_name = file.filename + '{0}'.format(time.time() * 1000)
        file_time_name = file_time_name.encode("utf")
        filename_h = hashlib.md5(file_time_name).hexdigest()

        file.filename = '{0}.{1}'.format(filename_h, extension)
        print('filename:', file.filename)

        # save the file into the hash of the filename
        file.save(os.path.join(app.config['UPLOAD_FOLDER'], file.filename))

        # read the file, find the face make the vector
        face_image = cv2.imread(
            os.path.join(app.config['UPLOAD_FOLDER'], file.filename))

        # get location of face so I can return it to gui.
        list_face_locs = face.face_locations(face_image, 2)
        enc = None

        (top, right, bottom, left) = list_face_locs[0]
        face_array = face_image[top:bottom, left:right]

        if app.config['normalize']:
            # normalize
            list_face_encodings = normalize_faces(face_image, list_face_locs,
                                                  JITTERS)
            enc = list_face_encodings[0][0]
        else:
            #not normalize
            enc = face.face_encodings(face_image, list_face_locs)[0]

        loc = list_face_locs[0]

        print('enc_len:', len(enc))
        sys.stdout.flush()

        # make a reference to the vector as a loose hash to the file
        #h = vec2hash(enc)
        h = pic2hash(face_array)
        temp = (loc, enc, h)
        retval.append(temp)
    more_results = process_additional_vectors(additional_vectors)
    retval = retval + more_results
    return retval
def process_vid(filename, reduceby, every, tolerance, jitters):

    list_face_locations = []

    frame_number = 0
    filename = filename.split('/')[-1]
    print('about to process:', filename)
    sys.stdout.flush()
    in_filename = join('/in', filename)

    camera = cv2.VideoCapture(in_filename)
    capture_length = int(camera.get(cv2.CAP_PROP_FRAME_COUNT))

    progress = tqdm(total=capture_length)

    print('there are {0} frame_numbers'.format(capture_length))
    sys.stdout.flush()

    file_hash = file_digest(in_filename)

    constants = make_constants(filename, file_hash, reduceby, tolerance,
                               jitters)

    people = defaultdict(dict)

    keep_going = True
    while keep_going:
        for _ in range(every):
            # only face detect every once in a while
            progress.update(1)
            progress.set_description('faces:{0} '.format(len(people)))
            progress.refresh()
            frame_number += 1
            keep_going, img = camera.read()
            if img is None:
                print('end of capture:IMG')
                sys.stdout.flush()
                keep_going = False
                break
            if frame_number > capture_length:
                print('end of capture:Length')
                sys.stdout.flush()
                keep_going = False
                break
            if not keep_going:
                print('end of capture:keep_going')
                sys.stdout.flush()
                keep_going = False
                break

        if not keep_going:
            break

        resized_image = cv2.resize(img, (0, 0),
                                   fx=1.0 / reduceby,
                                   fy=1.0 / reduceby)

        list_face_locations = face.face_locations(resized_image)
        list_face_encodings = face.face_encodings(resized_image,
                                                  list_face_locations, jitters)

        match_to_faces(list_face_encodings, list_face_locations, people,
                       resized_image, frame_number, constants)

    # finished processing file for faces write out pickle
    print('processing completed writing outputfile\n')
    out_file = join('/out', '{0}.face_detected.pickle'.format(filename))
    print('writting output to', out_file)
    sys.stdout.flush()
    pickle.dump(people, open(out_file, 'wb'))

    print('done\n')
    sys.stdout.flush()
def process_vid(filename, reduceby, every, tolerance, jitters):

    frame_number = 0
    num_detections = 0
    filename = filename.split('/')[-1]
    in_filename = join('/in', filename)

    #camera = cv2.VideoCapture(in_filename)
    #capture_length = int(camera.get(cv2.CAP_PROP_FRAME_COUNT))
    capture_length = get_movie_length(filename)

    progress = tqdm(total=capture_length)

    file_hash = file_digest(in_filename)

    constants = make_constants(filename, file_hash, reduceby, tolerance,
                               jitters)

    people = defaultdict(dict)

    keep_going = True
    first = True

    while keep_going:

        if not first:
            if (every + frame_number) > capture_length:
                keep_going = False
                progress.close()
                break
            frame_number += every
            #camera.set(1, frame_number)
            progress.update(every)
        else:
            first = False

        #keep_going, img = camera.read()
        keep_going, img = get_frame_inefficient(filename, frame_number)

        # only face detect every once in a while
        progress.set_description('faces:{0} detections:{1}'.format(
            len(people), num_detections))
        progress.refresh()

        if img is None:
            progress.close()
            print('\nend of capture:IMG')
            keep_going = False
            break
        if frame_number > capture_length:
            progress.close()
            print('\nend of capture:Length')
            keep_going = False
            break
        if not keep_going:
            progress.close()
            print('\nend of capture:keep_going')
            keep_going = False
            break

        if not keep_going:
            progress.close()
            break

        resized_image = cv2.resize(img, (0, 0),
                                   fx=1.0 / reduceby,
                                   fy=1.0 / reduceby)

        list_face_locations = face.face_locations(resized_image, 2)
        list_face_encodings = normalize_faces(resized_image,
                                              list_face_locations, jitters)

        num_detections += len(list_face_locations)

        #list_face_encodings = face.face_encodings( resized_image, list_face_locations, jitters)

        match_to_faces(list_face_encodings, list_face_locations, people,
                       resized_image, frame_number, constants)

    # finished processing file for faces, write out pickle
    out_file = join('/out', '{0}.face_detected.pickle'.format(filename))
    sys.stdout.flush()
    pickle.dump(people, open(out_file, 'wb'))
    print('Wrote output to ' + out_file)
    sys.stdout.flush()