예제 #1
0
파일: demo.py 프로젝트: li-plus/undergrad
def main():
    # scaling up & down
    src = np.array(Image.open(root / 'fig/castle.jpg'))
    h, w, c = src.shape
    padding = np.zeros((h, 4, c), dtype=np.uint8)
    scale_down = seam_carving.resize(src, (w - 200, h))
    scale_up = seam_carving.resize(src, (w + 200, h))
    merged = np.hstack((src, padding, scale_down, padding, scale_up))
    Image.fromarray(merged).show()

    # forward energy vs backward energy
    src = np.array(Image.open(root / 'fig/bench.jpg'))
    h, w, c = src.shape
    padding = np.zeros((h, 4, c), dtype=np.uint8)
    backward = seam_carving.resize(src, (w - 200, h))
    forward = seam_carving.resize(src, (w - 200, h), energy_mode='forward')
    merged = np.hstack((src, padding, backward, padding, forward))
    Image.fromarray(merged).show()

    # object removal
    src = np.array(Image.open(root / 'fig/beach.jpg'))
    h, w, c = src.shape
    mask = np.array(Image.open(root / 'fig/beach_girl.png').convert('L'))
    dst = seam_carving.remove_object(src, mask)
    padding = np.zeros((h, 4, c), dtype=np.uint8)
    merged = np.hstack((src, padding, dst))
    Image.fromarray(merged).show()
예제 #2
0
파일: cli.py 프로젝트: li-plus/seam-carving
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('src', type=str)
    parser.add_argument('-o', dest='dst', type=str, default='a.jpg')
    parser.add_argument('--keep', type=str, default=None)
    parser.add_argument('--drop', type=str, default=None)
    parser.add_argument('--dw', type=int, default=0)
    parser.add_argument('--dh', type=int, default=0)
    parser.add_argument('--energy', type=str, default='backward',
                        choices=['backward', 'forward'])
    parser.add_argument('--order', type=str, default='width-first',
                        choices=['width-first', 'height-first', 'optimal'])
    args = parser.parse_args()

    try:
        print('Loading source image from {}'.format(args.src))
        src = np.array(Image.open(args.src))

        drop_mask = None
        if args.drop is not None:
            print('Loading drop_mask from {}'.format(args.drop))
            drop_mask = np.array(Image.open(args.drop).convert('L'))

        keep_mask = None
        if args.keep is not None:
            print('Loading keep_mask from {}'.format(args.keep))
            keep_mask = np.array(Image.open(args.keep).convert('L'))

        print('Performing seam carving...')
        start = time.time()
        if drop_mask is not None:
            dst = seam_carving.remove_object(src, drop_mask, keep_mask)
        else:
            src_h, src_w, _ = src.shape
            dst = seam_carving.resize(src, (src_w + args.dw, src_h + args.dh),
                                      args.energy, args.order, keep_mask)
        print('Done at {:.4f} second(s)'.format(time.time() - start))

        print('Saving output image to {}'.format(args.dst))
        Path(args.dst).parent.mkdir(parents=True, exist_ok=True)
        Image.fromarray(dst).save(args.dst)
    except Exception as e:
        print(e)
        exit(1)
예제 #3
0
파일: main.py 프로젝트: li-plus/undergrad
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--src', type=str, default=None)
    parser.add_argument('--mask', type=str, default=None)
    parser.add_argument('--dst', type=str, default=None)
    parser.add_argument('--delta-col', type=int, default=0)
    args = parser.parse_args()

    src = cv2.imread(args.src)

    if args.mask:
        mask = cv2.imread(args.mask, cv2.IMREAD_GRAYSCALE)
        dst = seam_carving.remove_object(src, mask)
    else:
        if args.delta_col > 0:
            dst = seam_carving.insert_seams(src, args.delta_col)
        else:
            dst = seam_carving.remove_seams(src, abs(args.delta_col))

    cv2.imwrite(args.dst, dst)
예제 #4
0
mask = io.imread('imgs/wyeth_mask.jpg', as_grey=True)
mask = util.img_as_bool(mask)

plt.subplot(1, 2, 1)
plt.title('Original Image')
plt.imshow(image)

plt.subplot(1, 2, 2)
plt.title('Mask of the object to remove')
plt.imshow(mask)

plt.show()
from seam_carving import remove_object

# Use your function to remove the object
out = remove_object(image, mask)

plt.subplot(2, 2, 1)
plt.title('Original Image')
plt.imshow(image)

plt.subplot(2, 2, 2)
plt.title('Mask of the object to remove')
plt.imshow(mask)

plt.subplot(2, 2, 3)
plt.title('Image with object removed')
plt.imshow(out)

plt.show()
    def detect_facial_feature(self, img, visible=False):
        face_cascade = cv2.CascadeClassifier('cascades/frontalFace.xml')
        eye_cascade = cv2.CascadeClassifier('cascades/Eyes_cascade.xml')
        nose_cascade = cv2.CascadeClassifier('cascades/Nose_cascade.xml')
        mouth_cascade = cv2.CascadeClassifier('cascades/Mouth_cascade.xml')

        roi_data = {}

        overlay = img.copy()
        gray = cv2.cvtColor(overlay, cv2.COLOR_BGR2GRAY)

        faces = self.__get_best_feature_roi(gray,
                                            face_cascade,
                                            k_mean=5,
                                            scale=1.3)
        for (x, y, w, h) in faces:
            face = img[y:y + h, x:x + w]
            roi_data['face'] = face
            roi_gray = gray[y:y + h, x:x + w]
            roi_color = overlay[y:y + h, x:x + w]
            face_img = roi_color.copy()

            cv2.rectangle(overlay, (x, y), (x + w, y + h), (255, 0, 0), 2)
            face_center = self.__get_center(left_top_pixel=(x, y),
                                            right_bottom_pixel=(x + w, y + h))
            cv2.circle(overlay, face_center, 2, (255, 0, 0), -1)
            print('Face: ', face_center)

            eyes = self.__get_best_feature_roi(roi_gray,
                                               eye_cascade,
                                               k_mean=3,
                                               scale=1.3)
            for (ex, ey, ew, eh) in eyes:
                roi_data['eyes'] = face[ey:ey + eh, ex:ex + ew]
                cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh),
                              (0, 255, 0), 2)
                #cv2.circle(roi_color, (int((ey+eh)/2), int((ex+ew)/2)), 2, (0, 255, 0), -1)
                eyes_center = self.__get_center(left_top_pixel=(ex, ey),
                                                right_bottom_pixel=(ex + ew,
                                                                    ey + eh))
                cv2.circle(roi_color, eyes_center, 2, (0, 255, 0), -1)
                print('Eyes: ', eyes_center)

            nose = self.__get_best_feature_roi(roi_gray,
                                               nose_cascade,
                                               k_mean=3,
                                               scale=1.3)
            for (nx, ny, nw, nh) in nose:
                roi_data['nose'] = face[ny:ny + nh, nx:nx + nw]
                cv2.rectangle(roi_color, (nx, ny), (nx + nw, ny + nh),
                              (0, 0, 255), 2)
                nose_center = self.__get_center(left_top_pixel=(nx, ny),
                                                right_bottom_pixel=(nx + nw,
                                                                    ny + nh))
                cv2.circle(roi_color, nose_center, 2, (0, 0, 255), -1)
                print('Nose: ', nose_center)

            mouth = self.__get_best_feature_roi(roi_gray,
                                                mouth_cascade,
                                                k_mean=50,
                                                k_mean_step=10,
                                                scale=1.3)
            for (mx, my, mw, mh) in mouth:
                roi_data['mouth'] = face[my:my + mh, mx:mx + mw]
                cv2.rectangle(roi_color, (mx, my), (mx + mw, my + mh),
                              (255, 255, 0), 2)
                mouth_center = self.__get_center(left_top_pixel=(mx, my),
                                                 right_bottom_pixel=(mx + mw,
                                                                     my + mh))
                cv2.circle(roi_color, mouth_center, 2, (255, 255, 0), -1)
                print('Mouth: ', mouth_center)

            if len(eyes) == 1 and len(nose) == 1:
                ex, ey, ew, eh = eyes[0]
                nx, ny, nw, nh = nose[0]
                roi_data['right_cheek'] = face[ey + eh:int((ny + ny + nh) / 2),
                                               ex:nx]
                roi_data['left_cheek'] = face[ey + eh:int((ny + ny + nh) / 2),
                                              nx + nw:ex + ew]
                cv2.rectangle(roi_color, (ex, ey + eh),
                              (nx, int((ny + ny + nh) / 2)), (255, 0, 255), 2)
                cv2.rectangle(roi_color, (nx + nw, ey + eh),
                              (ex + ew, int(
                                  (ny + ny + nh) / 2)), (255, 0, 255), 2)

            removed_features_on_face = face
            # if len(eyes) == 1:
            #     removed_features_on_face = self.feature_smoothing(removed_features_on_face, eyes[0])
            # if len(nose) == 1:
            #     removed_features_on_face = self.feature_smoothing(removed_features_on_face, nose[0])
            # if len(mouth) == 1:
            #     removed_features_on_face = self.feature_smoothing(removed_features_on_face, mouth[0])
            # roi_data['skin'] = removed_features_on_face

            if len(eyes) == 1:
                pass
                #removed_features_on_face = seam_carving.remove_object(removed_features_on_face, eyes[0])
            if len(nose) == 1:
                removed_features_on_face = seam_carving.remove_object(
                    removed_features_on_face, nose[0])
            if len(mouth) == 1:
                removed_features_on_face = seam_carving.remove_object(
                    removed_features_on_face, mouth[0])
            roi_data['skin'] = removed_features_on_face

            if visible:
                h, w = overlay.shape[:2]
                if h > 1200 or w > 1200:
                    img = cv2.resize(overlay,
                                     dsize=None,
                                     fx=0.4,
                                     fy=0.4,
                                     interpolation=cv2.INTER_AREA)
                cv2.imshow('Facial Features', overlay)
                #cv2.imshow('Mask', self.__filter_skin(face_img))
                cv2.waitKey()
                cv2.destroyAllWindows()

        return roi_data
예제 #6
0
파일: cli.py 프로젝트: li-plus/undergrad
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('src', type=str)
    parser.add_argument('-o', dest='dst', type=str, required=True)
    parser.add_argument('--keep', type=str, default=None)
    parser.add_argument('--drop', type=str, default=None)
    parser.add_argument('--dw', type=int, default=0)
    parser.add_argument('--dh', type=int, default=0)
    parser.add_argument('--energy',
                        type=str,
                        default='backward',
                        choices=['backward', 'forward'])
    parser.add_argument('--order',
                        type=str,
                        default='width-first',
                        choices=['width-first', 'height-first', 'optimal'])
    parser.add_argument('--face', action='store_true')
    args = parser.parse_args()

    try:
        print('Loading source image from {}'.format(args.src))
        src = np.array(Image.open(args.src))

        drop_mask = None
        if args.drop is not None:
            print('Loading drop_mask from {}'.format(args.drop))
            drop_mask = np.array(Image.open(args.drop).convert('L'))

        keep_mask = None
        if args.keep is not None:
            print('Loading keep_mask from {}'.format(args.keep))
            keep_mask = np.array(Image.open(args.keep).convert('L'))

        if args.face:
            # Face detection using face++ API
            with open(Path(__file__).parent / 'api_config.json') as f:
                api_config = json.load(f)
            with open(args.src, 'rb') as f:
                img_data = f.read()
            image_base64 = base64.b64encode(img_data)
            url = 'https://api-us.faceplusplus.com/facepp/v3/detect'
            data = {
                'api_key': api_config['api_key'],
                'api_secret': api_config['api_secret'],
                'image_base64': image_base64
            }
            response = requests.post(url, data)
            data = response.json()

            src_h, src_w, _ = src.shape
            if keep_mask is None:
                keep_mask = np.zeros((src_h, src_w), dtype=np.bool)

            for face in data['faces']:
                rect = face['face_rectangle']
                x1 = rect['left']
                y1 = rect['top']
                w = rect['width']
                h = rect['height']
                keep_mask[y1:y1 + h, x1:x1 + w] = True

        print('Performing seam carving...')
        start = time.time()
        if drop_mask is not None:
            dst = seam_carving.remove_object(src, drop_mask, keep_mask)
        else:
            src_h, src_w, _ = src.shape
            dst = seam_carving.resize(src, (src_w + args.dw, src_h + args.dh),
                                      args.energy, args.order, keep_mask)
        print('Done at {:.4f} second(s)'.format(time.time() - start))

        print('Saving output image to {}'.format(args.dst))
        Path(args.dst).parent.mkdir(parents=True, exist_ok=True)
        Image.fromarray(dst).save(args.dst)
    except Exception as e:
        print(e)
        exit(1)