Beispiel #1
0
def main(args):
    image_paths = read_list(args.image_list)
    for path in image_paths:
        im = open_image(path)
        # resize for memory
        width, height = im.size
        if height > 800:
            im = im.resize((int(800 * width / height), 800))
            width, height = im.size

        # use 2D-FAN detect landmarks
        fa = FaceAlignment(LandmarksType._2D,
                           enable_cuda=True,
                           flip_input=False,
                           use_cnn_face_detector=True)
        try:
            landmarks = fa.get_landmarks(np.array(im))[-1]
            landmarks[:, 1] = height - landmarks[:, 1]
        except:
            continue

        # generate a contour curve with contour points
        hull = ConvexHull(landmarks)
        # draw landmarks
        lm = np.array(im)
        for i in range(landmarks.shape[0]):
            rr, cc = draw.circle(height - landmarks[i, 1].astype('int32'),
                                 landmarks[i, 0].astype('int32'), 5)
            lm[rr, cc, :] = np.array((255, 0, 0))
        # create mask
        mask = np.zeros((height, width))
        rr, cc = draw.polygon(height - landmarks[hull.vertices, 1],
                              landmarks[hull.vertices, 0], mask.shape)
        mask[rr, cc] = 1

        save = True if args.save == 'True' else False
        path = path[:-1] if path[:-1] == '/' else path
        image_name = path[path.rindex('/') + 1:-4] + '_contour_nocrf.png'
        show_result(lm,
                    mask,
                    np.tile((mask != 0)[:, :, np.newaxis], (1, 1, 3)) * im,
                    save=save,
                    filename='images/' + image_name)

        # add CRF
        #prob = np.concatenate(((1-mask)[np.newaxis,:,:]*0.9 + mask[np.newaxis, :, :]*0.1, mask[np.newaxis, :, :]*0.9 + (1-mask)[np.newaxis, :, :]*0.1), axis=0)
        prob = ndimage.gaussian_filter(mask * 1.0, sigma=5)
        prob = np.concatenate(
            ((1 - prob)[np.newaxis, :, :], prob[np.newaxis, :, :]), axis=0)

        map = CRF(prob, np.array(im))
        image_name = path[path.rindex('/') + 1:-4] + '_contour_crf.png'
        show_result(im,
                    map,
                    np.tile((map != 0)[:, :, np.newaxis], (1, 1, 3)) * im,
                    save=save,
                    filename='images/' + image_name)
import numpy as np
import os
import cv2
from FaceAligner import FaceAlign
from face_alignment.api import FaceAlignment, LandmarksType


source_dataset_path = '../datasets/fer2013/'
cant_crop_path = '../datasets/fer2013_HE_crop/cant_crop/'
more_face_path = '../datasets/fer2013_HE_crop/more_face/'
Coor_2D = FaceAlignment(LandmarksType._2D, flip_input = False)
Rotate = FaceAlign()

def HistEqualize(image_array):

    equ = cv2.equalizeHist(image_array)
    return equ


def cut_range(landmarks):
   a = landmarks.max(axis = 0)
   # print(a)
   b = landmarks.min(axis = 0)
   # print(b)
   low_max = np.clip(a, 0, 100)
   low_min = np.clip(b, 0, 100)
   # width = low_max[0] - low_min[0]
   # height = low_max[1] - low_min[1]
   # x_min = low_min[0]
   # y_min =
   return low_min[0], low_min[1], low_max[0], low_max[1]
Beispiel #3
0
def main(args):
    caffe.set_mode_gpu()
    caffe.set_device(0)

    # Load both networks
    #net1 = caffe.Net('model/net_landmarks.prototxt', \
    #                 'model/params_landmarks.caffemodel', caffe.TEST)
    net2 = caffe.Net('model/net_segmentation.prototxt', \
                     'model/params_segmentation.caffemodel', caffe.TEST)

    palette = get_palette()

    # We have a Gaussian to recover the output slightly - better results
    f = scipy.io.loadmat('gaus.mat')['f']

    # load image names
    image_paths = read_list(args.image_list)

    # segment and measure performance
    for path in image_paths:
        if path[-3:] == 'jpg' or path[-3:] == 'png':
            imi = open_image(path)
            # resize for memory
            width, height = imi.size
            if height > 800:
                imi = imi.resize((int(800*width/height), 800))
        else:
            continue

        # use 2D-FAN detect landmarks
        fa = FaceAlignment(LandmarksType._2D, enable_cuda=True,
                           flip_input=False, use_cnn_face_detector=True)
        try:
            landmarks = fa.get_landmarks(np.array(imi))[-1]
            landmarks = landmarks.astype('uint16')
        except:
            continue

        if args.crop == 'middle':
            imi, landmarks = crop_image_middle(landmarks, imi)
        elif args.crop == 'min':
            imi, landmarks = crop_image_min(landmarks, imi)

        landmarks[:,0], landmarks[:,1] = landmarks[:,1].copy(), landmarks[:,0].copy()

        # prepare the image, limit image size for memory
        width, height = imi.size
        if width > height:
            if width > 450:
                imi = imi.resize((450, int(450 * height/width)))
                landmarks[:,0] = landmarks[:,0] * 450.0 / width
                landmarks[:,1] = landmarks[:,1] * 450.0 / width
            #elif height < 300:
            #    imi = imi.resize((int(300 * width/height), 300))
        else:
            if height > 450:
                imi = imi.resize((int(450 * width/height), 450))
                landmarks[:,0] = landmarks[:,0] * 450.0 / height
                landmarks[:,1] = landmarks[:,1] * 450.0 / height
            #elif width < 300:
            #    imi = imi.resize((300, int(300 * height/width)))
        width, height = imi.size
        im = np.array(imi, dtype=np.float32)
        if len(im.shape) == 2:
            im = np.reshape(im, im.shape+(1,))
            im = np.concatenate((im,im,im), axis=2)
        im = im[:,:,::-1] # RGB to BGR

        # trained with different means (accidently)
        segIm = im - np.array((87.86,101.92,133.01))
        segIm = segIm.transpose((2,0,1))

        # Do some recovery of the points
        C = np.zeros((landmarks.shape[0], height, width), 'uint8') # cleaned up heatmaps
        C = np.pad(C, ((0,0), (120,120), (120,120)), 'constant')

        for k in range(0,68):
            C[k,landmarks[k,0]+120-100:landmarks[k,0]+120+101,landmarks[k,1]+120-100:landmarks[k,1]+120+101] = f
        C = C[:,120:-120,120:-120] * 0.5

        # Forward through the segmentation network
        D = np.concatenate((segIm, C))
        net2.blobs['data'].reshape(1, *D.shape)
        net2.blobs['data'].data[0,:,:,:] = D
        net2.forward()
        mask = net2.blobs['score'].data[0].argmax(axis=0)
        S = Image.fromarray(mask.astype(np.uint8))
        S.putpalette(palette)

        print 'close figure to process next image'

        # transfer score to probability with softmax for later unary term
        score = net2.blobs['score'].data[0]
        prob = np.exp(score) / np.sum(np.exp(score), 0) # (nlabels, height, width)
        #prob_max = np.max(prob, 0) # (0.28, 1)

        # CRF
        map = CRF(prob, im) # final label

        # show result
        save = True if args.save == 'True' else False
        path = path[:-1] if path[-1] == '/' else path
        image_name = path[path.rindex('/')+1:-4] + '_part_nocrf_' + args.crop + '.png'
        show_result(imi, mask, np.tile((mask!=0)[:,:,np.newaxis], (1,1,3)) * imi,
                    save=save, filename='images/'+image_name)
        image_name = path[path.rindex('/')+1:-4] + '_part_crf_' + args.crop + '.png'
        show_result(imi, map, np.tile((map!=0)[:,:,np.newaxis], (1,1,3)) * imi,
                    save=save, filename='images/'+image_name)
Beispiel #4
0
def main(args):
    image_paths = read_list(args.image_list)

    # init
    caffe.set_device(0)
    caffe.set_mode_gpu()

    # load net
    net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)

    for path in image_paths:
        # load image, switch to BGR, subtract mean, and make dims C x H x W for Caffe

        if path[-3:] == 'jpg' or path[-3:] == 'png':
            imi = open_image(path)
            # resize for memory
            width, height = imi.size
            if height > 800:
                imi = imi.resize((int(800 * width / height), 800))
        else:
            continue

        # use 2D-FAN detect landmarks
        fa = FaceAlignment(LandmarksType._2D,
                           enable_cuda=True,
                           flip_input=False,
                           use_cnn_face_detector=True)
        try:
            landmarks = fa.get_landmarks(np.array(imi))[-1]
            landmarks = landmarks.astype('uint16')
        except:
            continue

        if args.crop == 'middle':
            imi, landmarks = crop_image_middle(landmarks, imi)
        elif args.crop == 'min':
            imi, landmarks = crop_image_min(landmarks, imi)

        if '300' in args.prototxt:
            imi = imi.resize((300, 300))
        else:
            imi = imi.resize((500, 500))
        im = np.array(imi, dtype=np.float32)
        im = im[:, :, ::-1]
        im -= np.array((104.00698793, 116.66876762, 122.67891434))
        im = im.transpose((2, 0, 1))

        # shape for input (data blob is N x C x H x W), set data
        net.blobs['data'].reshape(1, *im.shape)
        net.blobs['data'].data[...] = im

        # run net and take argmax for prediction
        net.forward()
        mask = net.blobs['score'].data[0].argmax(axis=0)
        im_seg = imi * np.tile((mask != 0)[:, :, np.newaxis], (1, 1, 3))

        save = True if args.save == 'True' else False
        path = path[:-1] if path[-1] == '/' else path
        if '300' in args.prototxt:
            image_name = path[path.rindex('/') +
                              1:-4] + '_yuval_300_nocrf_' + args.crop + '.png'
        else:
            image_name = path[path.rindex('/') +
                              1:-4] + '_yuval_nocrf_' + args.crop + '.png'

        show_result(imi,
                    mask,
                    im_seg,
                    save=save,
                    filename='images/' + image_name)

        # generate prob
        #prob = np.concatenate(((1-mask)[np.newaxis,:,:]*0.9 + mask[np.newaxis,:,:]*0.1, mask[np.newaxis,:,:]*0.9+(1-mask)[np.newaxis,:,:]*0.1), axis=0)
        prob = ndimage.gaussian_filter(mask * 1.0, sigma=5)
        prob = np.concatenate(
            ((1 - prob)[np.newaxis, :, :], prob[np.newaxis, :, :]), axis=0)

        # add CRF
        map = CRF(prob, np.array(imi))
        if '300' in args.prototxt:
            image_name = path[path.rindex('/') +
                              1:-4] + '_yuval_300_crf_' + args.crop + '.png'
        else:
            image_name = path[path.rindex('/') +
                              1:-4] + '_yuval_crf_' + args.crop + '.png'
        show_result(imi,
                    map,
                    np.tile((map != 0)[:, :, np.newaxis], (1, 1, 3)) * imi,
                    save=save,
                    filename='images/' + image_name)
Beispiel #5
0
import cv2
import sys

from face_detection.api import S3FD
from face_detection.bbox import nms
from face_alignment.api import FaceAlignment
from face_alignment.api import LandmarksType

s3fd = S3FD("models/s3fd_convert.pth")
face_alignment = FaceAlignment(LandmarksType._3D)

score_threshold = 0.5
nms_threshold = 0.3

image = cv2.imread(sys.argv[1])

boxes = s3fd.detect(image, score_threshold)
boxes = boxes[nms(boxes, nms_threshold)]

figure = image.copy()

for box in boxes:
    box = box[0:4]
    landmarks = face_alignment.get_landmarks(image, box)

    for pt in landmarks:
        pt = tuple(pt[0:2].astype(int))
        cv2.circle(figure, pt, 3, (0, 255, 0))

cv2.imshow('', figure)
cv2.waitKey(0)
def main(args):
    image_paths = read_list(args.image_list)

    # init
    caffe.set_device(0)
    caffe.set_mode_gpu()

    # load net
    net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)

    for path in image_paths:
        # load image, switch to BGR, subtract mean, and make dims C x H x W for Caffe

        print path

        if path[-3:] == 'jpg' or path[-3:] == 'png':
            imi = open_image(path)
            # resize for memory
            width, height = imi.size
            if height > 800:
                imi = imi.resize((int(800*width/height), 800))
                width, height = imi.size
        else:
            continue

        dir = 'images/' + path[path.rindex('/', 0, path.rindex('/'))+1:path.rindex('/')] + '/'
        if not os.path.exists(dir):
            os.makedirs(dir)
        txtfile = dir+path[path.rindex('/')+1:-3]+'txt'

        # use 2D-FAN detect landmarks
        fa = FaceAlignment(LandmarksType._2D, enable_cuda=True,
                           flip_input=False, use_cnn_face_detector=True)
        try:
            landmarks = fa.get_landmarks(np.array(imi))[-1]
            save_landmarks(txtfile, landmarks)
            landmarks = landmarks.astype('uint16')
        except:
            # no face deteced
            continue

        if args.crop == 'middle':
            imi, landmarks = crop_image_middle(landmarks, imi)
        elif args.crop == 'min':
            imi, landmarks = crop_image_min(landmarks, imi)
        width, height = imi.size

        if '300' in args.prototxt:
            imi = imi.resize((300, 300))
            landmarks[:,0] = landmarks[:,0] * 300 / width
            landmarks[:,1] = landmarks[:,1] * 300 / height
            width = height = 300
        else:
            imi = imi.resize((500, 500))
            landmarks[:,0] = landmarks[:,0] * 500 / width
            landmarks[:,1] = landmarks[:,1] * 500 / height
            width = height = 500
        im = np.array(imi, dtype=np.float32)
        im = im[:,:,::-1]
        im -= np.array((104.00698793,116.66876762,122.67891434))
        im = im.transpose((2,0,1))

        # shape for input (data blob is N x C x H x W), set data
        net.blobs['data'].reshape(1, *im.shape)
        net.blobs['data'].data[...] = im

        # run net and take argmax for prediction
        net.forward()
        mask = net.blobs['score'].data[0].argmax(axis=0)

        save = True if args.save == 'True' else False
        path = path[:-1] if path[-1] == '/' else path
        if '300' in args.prototxt:
            image_name = path[path.rindex('/')+1:-4] + '_yuvalcontour_300_nocrf_' + args.crop + '.png'
        else:
            image_name = path[path.rindex('/')+1:-4] + '_yuvalcontour_nocrf_' + args.crop + '.png'

        # draw landmarks
        lm = np.array(imi)
        for i in range(landmarks.shape[0]):
            rr, cc = draw.circle(landmarks[i,1].astype('int32'), landmarks[i,0].astype('int32'), 1)
            lm[rr, cc, :] = np.array((255, 0, 0))

        # create mask contour
        hull = ConvexHull(landmarks)
        mask_contour = np.zeros((height, width))
        rr, cc = draw.polygon(landmarks[hull.vertices,1], landmarks[hull.vertices,0], mask_contour.shape)
        mask_contour[rr,cc] = 1
        mask = np.clip(mask + mask_contour, 0, 1)

        im_seg = imi * np.tile((mask!=0)[:,:,np.newaxis], (1,1,3))

        show_result(lm, mask, im_seg, save=save, filename=dir+image_name)

        # generate prob
        #prob = np.concatenate(((1-mask)[np.newaxis,:,:]*0.9 + mask[np.newaxis,:,:]*0.1, mask[np.newaxis,:,:]*0.9+(1-mask)[np.newaxis,:,:]*0.1), axis=0)
        prob = ndimage.gaussian_filter(mask*1.0, sigma=5)
        prob = np.concatenate(((1-prob)[np.newaxis,:,:], prob[np.newaxis,:,:]), axis=0)

        # add CRF
        map = CRF(prob, np.array(imi))
        if '300' in args.prototxt:
            image_name = path[path.rindex('/')+1:-4] + '_yuvalcontour_300_crf_' + args.crop + '.png'
        else:
            image_name = path[path.rindex('/')+1:-4] + '_yuvalcontour_crf_' + args.crop + '.png'
        show_result(imi, map, np.tile((map!=0)[:,:,np.newaxis], (1,1,3)) * imi, save=save, filename=dir+image_name)
    def remove_neck_landmarks(self, images_path, segs_path, root):
        """ Removes neck segmentation according to face landmarks model of the FaceEngine module

        Args:
            images_path: list of pathes to images
            segs_path: list of pathes to segmentation masks

        Returns:
            Segmentation masks without neck part and saves them as root/label/name.png
        """
        # I. Set 2D landmarks detector and gender detector for lfw dataset
        lm_detector = FaceAlignment(face_alignment.LandmarksType._2D, flip_input=False)
        d = gdetect.Detector()

        for i in tqdm(range(len(images_path))):
            name = segs_path[i].split('/')[-1]

            # II. Open IMAGE and segmentation MASK
            source_img = cv.imread(images_path[i])
            source_img = cv.cvtColor(source_img, cv.COLOR_BGR2RGB)
            source_seg = Image.open(segs_path[i])

            # III. Find LANDMARKS
            landmarks = lm_detector.get_landmarks_from_image(source_img)[0]
            # in order to remove ears as well
            left_b = [landmarks[0][0], landmarks[0][1] - (landmarks[2][1] - landmarks[0][1])]
            right_b = [landmarks[16][0], landmarks[16][1]-(landmarks[14][1] - landmarks[16][1])]
            lm = landmarks[:17]

            # IV. Get coordinates of points to create separation line (border)
            h, w = source_seg.size
            left_edge = [0, left_b[1]]
            right_edge = [w-1, right_b[1]]
            points_neck = left_edge + left_b + list(lm.ravel()) + right_b + right_edge
            points_beard = left_edge + left_b + right_b + right_edge

            # V. Get starting points for filler algorithm
            left = [left_edge[0], left_edge[1]+1]
            right = [right_edge[0], right_edge[1]+1]

            # VI. Draw border line
            source_seg = np.array(source_seg).astype(np.uint8)
            if len(np.unique(source_seg)) != 3:
                source_seg[source_seg==1] = 0
                source_seg[source_seg==2] = 1
            mask_neck = source_seg.copy()
            # mask_neck[mask_neck == 2] = 0
            mask_neck = Image.fromarray(mask_neck)
            draw = ImageDraw.ImageDraw(mask_neck)
            draw.line(points_neck, 255, 0)

            # VII. Create a mask without neck
            mask_neck = np.array(mask_neck)
            mask_no_neck = fill_underline(mask_neck, h, w, left, right)
            face_mask = source_seg.copy()

            # VIII. Remove redundant features
            first_name = name.split('_')[0]
            sex = d.get_gender(first_name)
            if len(np.unique(source_seg)) == 3:
                if sex == 'male':
                    # Cut beards and mustache
                    mask_beard = source_seg.copy()
                    mask_beard = Image.fromarray(mask_beard)

                    # draw separation line
                    draw = ImageDraw.ImageDraw(mask_beard)
                    draw.line(points_beard, 255, 0)

                    # fill all the area with 1 label
                    mask_beard = np.array(mask_beard)
                    mask_no_beard = fill_underline(mask_beard, h, w, left, right, fill=1)

                    # remove mustache
                    face_mask[mask_no_beard == 1] = 1

                    # remove beard and neck
                    face_mask[mask_no_neck == 0] = 0

                else:
                    # remove neck only for women
                    face_mask[mask_no_neck == 0] = 0
                    face_mask[source_seg == 2] = 2

            else:
                face_mask[mask_no_neck == 0] = 0

            # IX. Add palette
            mask_f = Image.fromarray(face_mask)
            if len(np.unique(face_mask)) == 3:
                mask_f = mask_f.convert('P', palette=Image.ADAPTIVE, colors=3)
                reverse_colors = np.array(mask_f)
                reverse_colors[reverse_colors == 0] = 3
                reverse_colors[reverse_colors == 2] = 0
                reverse_colors[reverse_colors == 3] = 2
            else:
                mask_f = mask_f.convert('P', palette=Image.ADAPTIVE, colors=2)
                reverse_colors = np.array(mask_f)
                reverse_colors[reverse_colors == 1] = 4
                reverse_colors[reverse_colors == 0] = 1
                reverse_colors[reverse_colors == 4] = 0
            mask_f = Image.fromarray(reverse_colors, mode='P')
            mask_f.putpalette([
                0, 0, 0,  # index 0 is black (background)
                0, 255, 0,  # index 1 is green (face)
                255, 0, 0,  # index 2 is red (hair)
            ])

            # X. Save results
            path = "{}/Masks/{}".format(root, name)
            mask_f.save(path)
Beispiel #8
0
import csv
import numpy as np
import os
import cv2
from skimage import io
from face_alignment.api import FaceAlignment, LandmarksType

fa_2D = FaceAlignment(LandmarksType._2D, flip_input=False)
fa_3D = FaceAlignment(LandmarksType._3D, flip_input=False)


class DataTransform(object):
    """"
    class for get the landmarks of dataset and tranform
    it to csv
    """
    def __init__(self, dataset_name='fer2013', dataset_path=None, method='3D'):

        self.dataset_name = dataset_name
        self.dataset_path = dataset_path
        self.method = method
        # if self.method == '2D':
        #     self.landmark_detect = FaceAlignment(LandmarksType._2D, flip_input=False)
        # elif self.method == '3D':
        #     self.landmark_detect = FaceAlignment(LandmarksType._3D, flip_input=False)
        if self.dataset_path != None:
            self.dataset_path = dataset_path
        elif self.dataset_name == 'fer2013':
            self.dataset_path = '../datasets/fer2013/'
        elif self.dataset_name == 'KDEF':
            self.dataset_path = '../datasets/KDEF/'