def main(): # get updated faces properties and grade weight groups = extractFacesProperties() face_attrs, img_attrs = getUpdatedGradesWeight() best_faces = [] for group in groups: for face in group: face.set_grade(face_attrs) best_faces.append(max(group, key = lambda face: face.grade).face_info) count_bet_faces_in_image = {} for best_face in best_faces: curr = count_bet_faces_in_image.get(best_face["image_index"], 0) count_bet_faces_in_image[best_face["image_index"]] = curr + 1 print(count_bet_faces_in_image) # get images with the highest number of good faces best_image_index = [k for k,v in count_bet_faces_in_image.items() if v == max(count_bet_faces_in_image.values())] # if there are more than 1 image with highest number of good faces if len(best_image_index) > 1: with open(FGFI_DIR + '/output/data.json', 'r') as f: image_data_json = json.load(f) best_images_props = [] best_images = [] for best_image in best_image_index: best_images_props.append(set_image_properties(image_data_json["images"][best_image]["path"])) best_images.append(imageToClass(best_images_props[best_image], best_image)) for image_prop in best_images: image_prop.set_grade(img_attrs) best_image_index = max(best_images, key=operator.attrgetter('grade')).index else: best_image_index = max(count_bet_faces_in_image.items(), key = operator.itemgetter(1))[0] faces_to_swap = [] for best_face in best_faces: if best_face["image_index"] != best_image_index: faces_to_swap.append({ "group_index": best_face["group_index"], "group_face_index": best_face["group_face_index"] }) utils.save_to_json_file(os.path.join(OUTPUT_DIR, "input.json"), json.dumps( { "image_index": best_image_index, "faces": faces_to_swap }))
def detect_properties(images): """Detects image properties in the file.""" from google.cloud import vision client = vision.ImageAnnotatorClient() for pic in images: with io.open(pic, 'rb') as image_file: content = image_file.read() image = vision.types.Image(content=content) response = client.image_properties(image=image) utils.save_to_json_file( os.path.join(OUTPUT_DIR, pic.split("/input/")[1].split(".")[0] + ".json"), MessageToJson(response))
def main(): images = get_images() images_info = crop_faces_from_images(images) groups_dict = get_cropped_images_groups(images_info) #Convert groups to list of dictionary groups = [[] for i in range(len(set(groups_dict.values())))] for image_face_index, group_index in groups_dict.items(): image_index, face_index = image_face_index.split("_") groups[group_index].append({ "image_index": int(image_index), "face_index": int(face_index) }) #Save the data to file utils.save_to_json_file( os.path.join(OUTPUT_DIR, "data.json"), json.dumps({ "images": images_info, "groups": groups }))
os.makedirs(NOBG_DIR) def removebg(image): response = requests.post( 'https://api.remove.bg/v1.0/removebg', files={'image_file': utils.image_to_bytes(image)}, data={'size': 'regular'}, headers={'X-Api-Key': API_KEY}, ) if response.status_code == requests.codes.ok: return Image.open(io.BytesIO(response.content)) else: print("Error:", response.status_code, response.text) if __name__ == "__main__": data = utils.from_json_file(os.path.join(JSON_DIR, "data.json")) input = utils.from_json_file(os.path.join(DEBUG_DIR, "input.json")) ngbg = [] for face in input["faces"]: cropped = utils.get_cropped_image(data, face, config.FGFI_DIR) ngbg.append({ "info": face, "image_data": base64.b64encode(removebg(cropped, face)).decode("utf-8") }) utils.save_to_json_file(os.path.join(OUTPUT_DIR, "nobg.json"), json.dumps(ngbg))