Esempio n. 1
0
def facedetect(width, height, face_percent, inpath, outpath, prefix, f):
    cropper = Cropper(
        width=width,
        height=height,
        face_percent=face_percent,
    )
    print(join(inpath, f))
    infile = join(inpath, f)
    outfile = join(outpath, prefix + f)
    print("in:", infile, "out:", outfile)
    # Get a Numpy array of the cropped image
    try:
        cropped_array = cropper.crop(infile)
    except Exception as e:
        print(e)
        return

    if cropped_array is None:
        logging.info("No face detected. Continuing...")
        resize_and_crop(infile, outfile, (width, height))
        return

    # Save the cropped image with PIL
    try:
        cropped_image = Image.fromarray(cropped_array)
    except Exception as e:
        logging.info("couldn't resize:", e)
        return
    cropped_image.save(outfile)
Esempio n. 2
0
  def extract_face(self, size=None, face_percent=50, padding=None, fix_gamma=True):
    """
      Uses face cropper of 'autocrop' pip package to extract the face
      from the image. Check the documentation linked below for more information.

      https://github.com/leblancfg/autocrop

      :param size: Tuple for dimensions of the result.
      :param face_percent: How much of the image should consist of the face.
      :param padding: Space to include beyond the face bounding box.
      :param fix_gamma: Fixes exposure issues resulting from cropping.
      :returns: DGImage containing the extraction result.
    """
    # If no special size, use image's own size
    if size is None:
      size = self.shape()[:2]

    # Manipulate image from RGB to BGR format
    # Because autocrop assumes BGR input
    image = cv2.cvtColor(self.get_image(mode='rgb'), cv2.COLOR_RGB2BGR)

    # Initialize cropper
    cropper = Cropper(
        width=size[1], height=size[0],
        face_percent=face_percent, padding=padding, fix_gamma=fix_gamma)
    cropped = cropper.crop(image)

    # If no face was found, will return None
    if cropped is None:
      return None

    return DGImage.from_image(cropped)
Esempio n. 3
0
def auto_crop(url, filename):
    cropper = Cropper()

    cropped_array = cropper.crop(url)

    cropped_image = Image.fromarray(cropped_array)

    cropped_image.save(filename)
Esempio n. 4
0
def crop_face(img, dim=256):
    # height, width, channels = img.shape
    # dim = min(height, width)

    cropper = Cropper(width=dim, height=dim)
    cropped = cropper.crop(img)
    if cropped is not None:
        cropped = cv2.cvtColor(cropped, cv2.COLOR_BGR2RGB)
    return cropped
Esempio n. 5
0
File: test.py Progetto: ceynri/FIC
 def load_tensor(self):
     cropper = Cropper(face_percent=100)
     img_cropped = cropper.crop(self.path)
     img = Image.fromarray(img_cropped)
     loader = transforms.Compose(
         [transforms.Resize(conf.IMAGE_SHAPE),
          transforms.ToTensor()])
     tensor = loader(img).unsqueeze(0)
     self.tensor = tensor
     return tensor
Esempio n. 6
0
    def __init__(self, path):
        self.path = Path(path)
        self.image_files = []
        self.crop = Cropper(face_percent=100)
        self.transform = transforms.Compose(
            [transforms.Resize((256, 256)),
             transforms.ToTensor()])

        print("begin loading")
        for pdir in track(list(self.path.glob('*/*.jpg'))):
            # use path index image
            self.image_files.append(str(pdir))
Esempio n. 7
0
class dataset(Dataset):
    def __init__(self, path):
        self.path = Path(path)
        self.image_files = []
        self.crop = Cropper(face_percent=100)
        self.transform = transforms.Compose(
            [transforms.Resize((256, 256)),
             transforms.ToTensor()])

        print("begin loading")
        for pdir in track(list(self.path.glob('*/*.jpg'))):
            # use path index image
            self.image_files.append(str(pdir))
        # self.image_files = self.image_files[:300000]
        #print('finish loading')

    def __len__(self):
        return (len(self.image_files))

    def __getitem__(self, index):
        cropped = self.crop.crop(self.image_files[index])
        if cropped is None:
            return None
        img = Image.fromarray(cropped).convert('RGB')
        img = self.transform(img)
        return img
Esempio n. 8
0
class testDataset():
    def __init__(self, path, class_num=15):
        self.path = Path(path)
        self.image_files = []
        self.crop = Cropper(face_percent=100)
        self.transform = transforms.Compose(
            [transforms.Resize((256, 256)),
             transforms.ToTensor()])

        self.data = []
        print("begin loading")
        i = 0
        for name in track(list(self.path.glob('*'))[:class_num]):
            images = list(name.glob('*.jpg'))
            for path in images:
                self.data.append((str(path), i))
            i += 1

    def __len__(self):
        return (len(self.data))

    def __getitem__(self, index):
        cropped = self.crop.crop(self.data[index][0])
        if cropped is None:
            return None, self.data[1]
        img = Image.fromarray(cropped).convert('RGB')
        img = self.transform(img)
        return img, self.data[index][1]
Esempio n. 9
0
    def __init__(self, path, class_num=15):
        self.path = Path(path)
        self.image_files = []
        self.crop = Cropper(face_percent=100)
        self.transform = transforms.Compose(
            [transforms.Resize((256, 256)),
             transforms.ToTensor()])

        self.data = []
        print("begin loading")
        i = 0
        for name in track(list(self.path.glob('*'))[:class_num]):
            images = list(name.glob('*.jpg'))
            for path in images:
                self.data.append((str(path), i))
            i += 1
Esempio n. 10
0
File: file.py Progetto: ceynri/FIC
    def load_tensor(self, face_percent: int = 100):
        stream = BytesIO(self.bytes)
        img = Image.open(stream)
        img_array = np.array(img)
        # RGB to BGR for cropper
        img_array = img_array[:, :, [2, 1, 0]]

        cropper = Cropper(face_percent=face_percent)
        img_cropped = cropper.crop(img_array)
        if img_cropped is None:
            raise 'No face recognized!'
        img = Image.fromarray(img_cropped)

        loader = transforms.Compose(
            [transforms.Resize(conf.IMAGE_SHAPE),
             transforms.ToTensor()])
        tensor = loader(img).unsqueeze(0)
        self.tensor = tensor
        return tensor
Esempio n. 11
0
def upload_image():
    global image_path
    if request.method == 'POST':
        # Get the file from post request
        f = request.files['file']

        # Save the file
        basepath = os.path.dirname(__file__)
        file_path = os.path.join(basepath, 'static/uploads/images',
                                 secure_filename(f.filename))
        f.save(file_path)

        cropper = Cropper(width=256, height=256)
        cropped_array = cropper.crop(file_path)
        cropped_image = Image.fromarray(cropped_array)
        cropped_image.save(file_path)

        image_path = file_path

        result = 'static/uploads/images/' + secure_filename(f.filename)
        return result
    return None
Esempio n. 12
0
from PIL import Image
from autocrop import Cropper
cropper = Cropper()
from tensorflow.data.dataset import
from keras.callbacks import Chec

for i in range(150):
    try:
        cropped_array = cropper.crop('ms/IU/'+str(i)+'.jpg')
        # print(cropped_array)
        cropped_image = Image.fromarray(cropped_array)
        cropped_image.save('ms/IU_cropped/'+str(i)+'.jpg')
    except AttributeError:
        print(i)
Esempio n. 13
0
from PIL import Image
from autocrop import Cropper

cropper = Cropper()

# Get a Numpy array of the cropped image
cropped_array = cropper.crop('portrait.png')

# Save the cropped image with PIL
cropped_image = Image.fromarray(cropped_array)
cropped_image.save('cropped.png')
Esempio n. 14
0
from PIL import Image
from matplotlib import image
import numpy as np
from numpy.linalg import norm
import numpy as np
import cv2
import math
from autocrop import Cropper

cropper = Cropper()


def detect_face(pixels, mtcnn_detector, retina_detector):

    # check which model is used
    check_mtcnn = 1

    # detect faces in the image using mtcnn
    results = mtcnn_detector.detect_faces(pixels)
    # print(results)

    #if result in MTCNN do not work well or with lower accuarcy we will use Retina for face detection
    if (not results) or results[0]['confidence'] < 0.9:
        check_mtcnn = 0
        print("====== mtcnn not working")
        results = retina_detector.predict(pixels)
        # result_img = detector.draw(pixels,faces)
        # cv2.imshow("result", result_img)
        # cv2.waitKey()
        # cv2.destroyAllWindows()
Esempio n. 15
0
from PIL import Image
from autocrop import Cropper

cropper = Cropper()

cropped_array = cropper.crop('ms_net/ms')

cropper_image = Image.fromarray(cropped_array)
cropped_image.save('ms/cropped.png')
Esempio n. 16
0
    cv2.ocl.setUseOpenCL(False)
    class_num = 15
    dataset = testDataset('/data/chenyangrui/train', class_num=class_num)
    ds = DataLoader(dataset=dataset,
                    num_workers=10,
                    batch_size=40,
                    shuffle=False,
                    drop_last=True,
                    pin_memory=True,
                    collate_fn=collate)

    standard = []
    transform = transforms.Compose(
        [transforms.Resize((256, 256)),
         transforms.ToTensor()])
    Crop = Cropper(face_percent=100)
    path = Path('/data/chenyangrui/train')
    for name in list(path.glob('*'))[:class_num]:
        images = list(name.glob('*.jpg'))
        for img in images:
            cropped = Crop.crop(str(img))
            if cropped is not None:
                img = Image.fromarray(cropped).convert('RGB')
                img = transform(img)
                standard.append(img)
                break

    matrix = np.zeros((class_num, class_num), dtype=np.int32)
    s_features = []
    # net = GAN(train=False).cuda(3)
    # param = torch.load("/data/chenyangrui/resnetGan/resnetGan.pth")
Esempio n. 17
0
from PIL import Image
from autocrop import Cropper
import glob
import os

cropper = Cropper()

count = 0
for file in glob.iglob(r'PATH/TO/DATASET/CAER-S/train/*/*.png',
                       recursive=True):
    count += 1
    print('{0:5d}/{1:5d}'.format(
        count, len(glob.glob(r'PATH/TO/DATASET/CAER-S/train/*/*.png'))))

    new_file = file.split('/')
    new_file[-3] += '_preprocessed'
    if not os.path.exists('/'.join(new_file[:-2])):
        os.makedirs('/'.join(new_file[:-2]))

    try:
        # Get a Numpy array of the cropped image
        cropped_array, masked_array = cropper.crop(file)

        # Save the cropped image with PIL
        cropped_image = Image.fromarray(cropped_array)
        masked_image = Image.fromarray(masked_array)

        if not os.path.exists('/'.join(new_file[:-1])):
            os.makedirs('/'.join(new_file[:-1]))

        cropped_image.save(
Esempio n. 18
0
    def load(self, size, grayscale=False, face_detection=False, limit=None):
        """loads all the images (grayscale) into the project

        Parameters
        ----------
        size : tuple,
            expected size of the dataset
        grayscale : bool, optional
            convert all images to grayscale if wanted, by default False
        face_detection : bool, optional
            performs face detection and cropping, by default False
        limit : int, optional
            number of maximum images to be loaded (for large datasets), by default None

        Returns
        -------
        np.array
            array of all the loaded images
        """
        self.gray = grayscale
        lg.info("Loading dataset...")
        if face_detection:
            lg.info("...and performing face detection...")
            # the face-cropped image has the wanted shape
            cropper = Cropper(width=size[0], height=size[1], face_percent=70)
        if limit is not None:
            end = limit
        else:
            end = len(self.image_paths)
        # begin loading
        self.images = []
        for p in range(end):
            path = self.image_paths[p]
            # perform face cropping if wanted
            if face_detection:
                img = cropper.crop(path)
                # if no faces have been detected, skip to the next image
                try:
                    img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
                except cv2.error:
                    continue
            else:  # no face cropping, just grayscaling
                img = cv2.imread(path)
                img = cv2.resize(
                    img, (config.image_shape[0], config.image_shape[1]))
            if self.gray:
                img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            self.images.append(img)
            # draw a progress line
            progress = ''
            for i in range(30):
                if i <= int(p / end * 30):
                    progress += '='
                else:
                    progress += '.'
            print(f'{str(p+1).zfill(len(str(end)))}/{end} |{progress}|',
                  end='\r',
                  flush=True)
        print('')
        lg.info(
            f'Found {len(self.images)} faces, {end - len(self.images)} images removed.'
        )

        return self.images
Esempio n. 19
0
from PIL import Image
from autocrop import Cropper
import shutil
import datetime
import re
import torch.multiprocessing as mp
import traceback
import sys

logging.basicConfig(format="%(asctime)s - %(levelname)s: %(message)s",
                    level=logging.INFO,
                    datefmt="%I:%M:%S")
TEMP_PATH = "temp"
FILES_PATH = "files"
WIDTH, HEIGHT = 256, 256
cropper = Cropper(width=256, height=256, smoother=True)


class Checker:
    def __init__(self, session):
        logging.info("Initialising Checker-Agent.")
        self.session = session
        self.replies_start = [
            "Hello! I make DeepFake videos \U0001F916.\nUse me with /start"
        ]
        self.replies_photo = [
            "Aight! Send me a photo \U0001F5BC.\nMake sure that the face is clearly visible",
            "Let's do this. Send me a photo with the face clearly visible \U0001F5BC",
            "Here we go. Send me a photo and make sure the face is clearly visible \U0001F5BC"
        ]
        self.replies_video = [
Esempio n. 20
0
 def dropface(self, path):
     cropper = Cropper()
     cropped_array = cropper.crop(path)
     cropped_image = Image.fromarray(cropped_array)
     cropped_image.save(path)
Esempio n. 21
0
from PIL import Image
from autocrop import Cropper
cropper = Cropper()

for i in range(150):
    try:
        cropped_array = cropper.crop('F:/Study/IU/' + str(i) + '.jpg')
        # print(cropped_array)
        cropped_image = Image.fromarray(cropped_array)
        cropped_image.save('F:/Study/IU_cropped/' + str(i) + '.jpg')
    except AttributeError:
        print(i)
Esempio n. 22
0
                    break


def set_faces(photo_base, face_base, cropper):
    photo_dir = os.listdir(photo_base)
    for label in photo_dir:
        for i, fn in enumerate(os.listdir(os.path.join(photo_base, label))):
            photos = os.path.join(photo_base, label, fn)
            try:
                cropped_array = cropper.crop(photos)
            except (AttributeError, TypeError):
                pass
            if cropped_array is not None:
                faces = Image.fromarray(cropped_array)
            path = os.path.join(face_base, label)
            if not os.path.exists(path):
                os.mkdir(path)
            faces.save(f'{face_base}{label}/{i}.jpg')


if __name__ == "__main__":
    VIDEO_BASE = 'video/'
    PHOTO_BASE = 'photo/'
    FACE_BASE = 'faces/'
    c = Cropper(width=240, height=240, face_percent=75)
    print("Creating photos from videos...")
    set_photo(VIDEO_BASE)
    print("Collecting faces from photos...")
    set_faces(PHOTO_BASE, FACE_BASE, c)
    print("Done!")
import numpy as np
from autocrop import Cropper
from PIL import Image

cropper = Cropper()
cropped_array = cropper.crop(
    "/Users/srisagarkalisetty1/Downloads/Evelyn Vega_aligned.jpg")
cropped_image = Image.fromarray(cropped_array)
cropped_image.show()