def test_wrong_n_of_action(self, random_lf): lfs = LandmarkFaces(random_lf, random_lf, random_lf) a = Multiple([Smile(), Smile()]) with pytest.raises(ValueError): a.perform(lfs)
def test_simple(self, random_lf, scale): a = Smile(scale) new_lf, df = a.perform(random_lf) if scale == 0: assert np.allclose(df.delta_x, np.zeros_like(df.delta_x)) assert np.allclose(df.delta_y, np.zeros_like(df.delta_y)) else: assert not np.allclose(df.delta_x, np.zeros_like(df.delta_x)) assert not np.allclose(df.delta_y, np.zeros_like(df.delta_y))
def smile(img): img_path = img img = cv2.imread(img_path) img8 = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) lf = LandmarkFace.estimate(img8) from pychubby.actions import Smile a = Smile(scale=0.2) new_lf, df = a.perform(lf) ani = create_animation(df, img) plt.imsave('output_image.gif', ani) plt.show()
def test_lf_to_lfs_casting(self, random_lf): a = Multiple(Smile()) new_lfs, df = a.perform(random_lf) assert isinstance(new_lfs, LandmarkFaces) assert isinstance(df, DisplacementField)
def test_wrong_constructor(self): with pytest.raises(TypeError): Multiple([Smile(), 'WRONG']) with pytest.raises(TypeError): Multiple('WRONG')
def test_overall(self, random_lf): steps = [Smile(), Chubbify()] a = Pipeline(steps) new_lf, df = a.perform(random_lf) assert df.is_valid
class TestMultiple: """Collection of tests focused on the ``Multiple`` action.""" @pytest.mark.parametrize('per_face_action', [Smile(), [Smile(), Smile()]], ids=['single', 'many']) def test_overall(self, random_lf, per_face_action): lf_1 = random_lf lf_2 = LandmarkFace(random_lf.points + np.random.random((68, 2)), random_lf.img) lfs = LandmarkFaces(lf_1, lf_2) a = Multiple(per_face_action) new_lfs, df = a.perform(lfs) assert isinstance(new_lfs, LandmarkFaces) assert isinstance(df, DisplacementField) assert len(lfs) == len(new_lfs) def test_wrong_n_of_action(self, random_lf): lfs = LandmarkFaces(random_lf, random_lf, random_lf) a = Multiple([Smile(), Smile()]) with pytest.raises(ValueError): a.perform(lfs) def test_wrong_constructor(self): with pytest.raises(TypeError): Multiple([Smile(), 'WRONG']) with pytest.raises(TypeError): Multiple('WRONG') def test_lf_to_lfs_casting(self, random_lf): a = Multiple(Smile()) new_lfs, df = a.perform(random_lf) assert isinstance(new_lfs, LandmarkFaces) assert isinstance(df, DisplacementField)
def photo(): img = request.args.get('photob62') imgdata = base64.b64decode(img) img = cv2.imread(io.BytesIO(base64.b64decode(imgdata))) img = cv2.imread(img) img8 = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) lf = LandmarkFace.estimate(img8) from pychubby.actions import Smile, OpenEyes, Multiple, RaiseEyebrow, StretchNostrils, AbsoluteMove smile = Smile(scale=0.2) new_lf, df = smile.perform(lf) # lf defined above # new_lf.plot(show_landmarks=False) plt.imsave('output_image.png', new_lf.img) import base64 encoded = base64.b64encode(open("output_image.png", "rb").read()) return encoded
cv2.putText(image, "Face #{}".format(i + 1), (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) # loop over the (x, y)-coordinates for the facial landmarks # and draw them on the image for (x, y) in shape: cv2.circle(image, (x, y), 2, (0, 0, 255, 255), -1) cv2.circle(blank_image, (x, y), 2, (0, 0, 255, 0), -1) cv2.imshow('Video', blank_image) cv2.imshow('video1', image) # img = frame print(frame.shape) try: lf = LandmarkFace.estimate(img) a_per_face = Pipeline([Smile()]) a_all = Multiple(a_per_face) new_lf, _ = a_all.perform(lf) new_img = new_lf #new_lf.plot(figsize=(5, 5), show_numbers=False) except: pass # data = np.fromstring(new_lf, dtype=np.uint8, sep='') # cv2.imshow('video', data) cam.send(blank_image) cam.sleep_until_next_frame() if cv2.waitKey(1) & 0xFF == ord('q'): break