def test_test2(self): s = os.path.join(self.smile_dir, 'test2.smile') j = os.path.join(self.json_dir, 'test2.jsn') a = pysmile.encode(json.load(open(j, 'rb'))) b = open(s, 'rb').read() self.assertEqual(a, b, '{}\nExpected:\n{!r}\nGot:\n{!r}'.format(s, b, a))
def callback_on_message(ch, method, properties, body): time_start = datetime.datetime.now() # byte array to bitmap frame = cv2.imdecode(np.asarray(bytearray(body)), cv2.IMREAD_COLOR) # Resize frame of video to 1/4 size for faster face recognition processing #small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25) small_frame = frame # no resizing #cv2.imwrite('test.bmp', small_frame) # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses) rgb_small_frame = small_frame[:, :, ::-1] # Find all the faces and face encodings in the current frame of video face_locations = face_recognition.face_locations(rgb_small_frame) # Display the results for (top, right, bottom, left) in face_locations: # Scale back up face locations since the frame we detected in was scaled to 1/4 size # top *= 4 # right *= 4 # bottom *= 4 # left *= 4 # encode face img for transport retval, buffer = cv2.imencode('.bmp', frame[top:bottom, left:right].copy()) # build a message to send to RMQ message = { 'top': top, 'right': right, 'bottom': bottom, 'left': left, 'process_time': (datetime.datetime.now() - time_start).total_seconds(), 'face': base64.b64encode(buffer) #extract the face } print('Processed %s' % message['process_time']) # push the faces to RMQ - serialize message with smile binary format rmq_client.publish_exchange(ch, rmq_completed_exchange, pysmile.encode(message))