def instagram(): results = [] api = InstagramAPI(client_id=app.config['CLIENT_ID'], client_secret=app.config['CLIENT_SECRET']) data = json.loads(request.data.decode()) lat = data["lat"] lng = data["lng"] dist = data["dist"] min_tstmp = data["min_timestamp"] your_location = api.media_search(count=100, lat=lat, lng=lng, distance=dist, min_timestamp=min_tstmp) for media in your_location: url = media.images['standard_resolution'].url pid = media.id img_paths = detect_faces(url, pid) if not img_paths == []: for img_path in img_paths: results.append(img_path) results = json.dumps(results) print "****** RESULTS ******" print " " print results return results
def _face_bb_full_img_calculator(img, face_frontal_cascade, face_profile_cascade): """ detects faces in image. returns biggest bounding box :param img: :param face_frontal_cascade: :param face_profile_cascade: :return: the biggest bounding boxes """ # DEBUG # cv2.imshow('image', img) # cv2.waitKey(0) # cv2.destroyAllWindows() # DEBUG END #rect_faces, rect_faces_frontal, rect_faces_profile = detect_faces(img) rect_faces, rect_faces_frontal, rect_faces_profile = detect_faces(img, face_frontal_cascade, face_profile_cascade) # sort list in descending order rect_faces.sort(key=lambda rect: rect.area(), reverse=True) if len(rect_faces) > 0: bbox = [rect_faces[0].x, rect_faces[0].y, rect_faces[0].w, rect_faces[0].h] else: bbox = [0, 0, 0, 0] return np.asarray(bbox)
def start(self): self.capture = True self.fps = 0 self.cap = cv2.VideoCapture(0) self.cap.set(3, self.width) self.cap.set(4, self.height) self.timer = set_interval(self.flush_frames, 1) while self.capture: ret, img = self.cap.read() self.fps += 1 gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) detected_faces = face_detection.detect_faces(gray) for (x, y, w, h) in detected_faces: cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2) cv2.imshow('video', img) cv2.waitKey(1)
def _face_bb_quarter_imgs_calculator(img, face_frontal_cascade, face_profile_cascade): """ splits the image into 4 subimages. for each subimage it detects faces, chooses the face with biggest bounding box and stores bb (x,y,w,h) as feature How the image is splitted: _________ | | | |__1_|__2_| | | | |__3_|_4__| :param img: :param face_frontal_cascade: :param face_profile_cascade: :return: the biggest bounding boxes (1 for each subimg) """ # split image into 4 quarter images height, width, channels = img.shape subimgs = [img[0:int(height / 2), 0:int(width / 2)], img[0:int(height / 2), int(width / 2):width], img[int(height / 2):height, 0:int(width / 2)], img[int(height / 2):height, int(width / 2):width] ] bboxes = [] # DEBUG # cv2.imshow('image', img) # cv2.waitKey(0) # # for subimg in subimgs: # cv2.imshow('image', subimg) # cv2.waitKey(0) # # cv2.destroyAllWindows() # DEBUG END for subimg in subimgs: rect_faces, rect_faces_frontal, rect_faces_profile = detect_faces(subimg, face_frontal_cascade, face_profile_cascade) # sort list in descending order rect_faces.sort(key=lambda rect: rect.area(), reverse=True) if len(rect_faces) > 0: bboxes.append(rect_faces[0].x) bboxes.append(rect_faces[0].y) bboxes.append(rect_faces[0].w) bboxes.append(rect_faces[0].h) else: bboxes.append(0) bboxes.append(0) bboxes.append(0) bboxes.append(0) return np.asarray(bboxes)
def _face_count_calculator(img, face_frontal_cascade=None, face_profile_cascade=None): directory_haarfeatures = os.getcwd() + '\\res\\haarcascades\\' if face_frontal_cascade is None: face_frontal_cascade = cv2.CascadeClassifier(directory_haarfeatures + 'haarcascade_frontalface_default.xml') if face_profile_cascade is None: face_profile_cascade = cv2.CascadeClassifier(directory_haarfeatures + 'haarcascade_profileface.xml') rect_faces, rect_faces_frontal, rect_faces_profile = detect_faces(img, face_frontal_cascade, face_profile_cascade) return len(rect_faces)
def detect_faces_for_user(username): user = User.objects.get(username=username) for photo in user.photos.all(): image_path = photo.image.path valid_faces = detect_faces(image_path) for face in valid_faces: face_image_path = make_face_images(image_path, face) Face.objects.get_or_create( user=user, photo=photo, image=face_image_path )
def is_face_present(): # Decode image data_uri = request.get_json(silent=True)['img'] img_np = base64_to_np(data_uri) # Faces detected faces = detect_faces(img_np) identity = get_face_identity(img_np) return { "facePresent": identity != None or len(faces) > 0, "personId": identity }
def upload(): target = os.path.join( APP_ROOT, 'static/') # Define the path where the uploaded image would be saved t = time.time() # Create a time stamp to distinguish each uploaded image for upload in request.files.getlist("file"): filename = str( t) + "upload.jpg" # Obtain the filename of the uploaded image destination = "/".join([target, filename]) # Define the path of the image upload.save(destination) # Save the image in the 'static' directory image = os.path.join(APP_ROOT, 'static/' + filename) # Get the path of the original image detect_faces(image, filename) # Detect the face on the uploaded image image_path = os.path.join( APP_ROOT, 'static/' + filename + 'face.jpg') # Get the path of the gray-scaled image # Check if there is any face detected if os.path.exists(image_path): result = predict_face( image_path) # Use the trained model to predict the probabilities # 'show.html' is rendered to show the results, passing the filename and the results to html page return render_template('show.html', filename=filename, anger=result[0][0], contempt=result[1][0], disgust=result[2][0], fear=result[3][0], happy=result[4][0], neutral=result[5][0], sadness=result[6][0], surprise=result[7][0]) else: # 'notshow.html' is rendered to show an error message return render_template('notshow.html')
def complete(request): code = request.GET['code'] access_token = api.exchange_code_for_access_token(code) auth_api = InstagramAPI(access_token=access_token[0]) user, _ = User.objects.get_or_create( username=access_token[1].get('username') ) # Don't repeat this! user.backend = 'django.contrib.auth.backends.ModelBackend' #authenticate(username=user.username, password=user.password) login(request, user) # Move to worker recent_media, _ = auth_api.user_recent_media(count=0) for media in recent_media: photo, created = Photo.objects.get_or_create( user=user, instagram_id=media.id ) if created: img_temp = NamedTemporaryFile(delete=True) with img_temp: request = requests.get( media.images['standard_resolution'].url, stream=True ) for block in request.iter_content(1024): if not block: break img_temp.write(block) img_temp.flush() photo.image.save( '{}.jpg'.format(photo.instagram_id), File(img_temp) ) image_path = photo.image.path valid_faces = detect_faces(image_path) for face in valid_faces: face_image_path = make_face_images(image_path, face) with open(face_image_path): face = Face.objects.create( user=user, photo=photo, ) face.image.name = face_image_path face.save() return redirect('choose')
def get_score(player_id): # calclate score input_file = photo_dir + '/' + str(player_id) + '.jpg' output_file = base_dir + '/' + str(player_id) + '.jpg' score = detect_faces(input_file, output_file) # save score with open(score_dir + '/' + str(player_id), 'w') as f: f.write(str(score)) # load player info player_info = None with open(player_info_dir + '/' + '%d.txt' % player_id, 'r') as f: # 最後の改行は読み込まない player_info = f.read()[:-1] # save result with open(result_file, 'a') as f: print(player_info + '%05d' % score, file=f) return str(score)
def display_frame(self, image_data): """ Paints a bounding box around detected faces in the camera feed and displays this frame. Receives image data, passes it to face_detection model then uses the returned results to paint a bounding box around detected faces. Then adjusts the widget size to match the size of the image. Args: image_data: Image data from the camera. """ detected_faces = detect_faces(image_data) for (bounding_box_start, bounding_box_end) in detected_faces: cv2.rectangle(image_data, bounding_box_start, bounding_box_end, self.__bounding_box_colour, self.__bounding_box_thickness) self.image = self.get_frame(image_data) if self.image.size() != self.size(): self.setFixedSize(self.image.size()) self.update()
def start(self): self.capture = True self.fps = 0 self.cap = cv2.VideoCapture(0) self.cap.set(3, self.width) self.cap.set(4, self.height) self.timer = set_interval(self.flush_frames, 1) while self.capture: ret, img = self.cap.read() self.fps += 1 gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) rgb_array = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) image = Image.fromarray(rgb_array) detected_faces = face_detection.detect_faces(image) for face in detected_faces: bounding_box = face.bounding_box.flatten().astype("int") (x1, y1, x2, y2) = bounding_box width = x2 - x1 height = y2 - y1 cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 0), 2) roi_gray = gray[y1:y2, x1:x2] roi_color = img[y1:y2, x1:x2] cv2.imshow('video', img) cv2.waitKey(1)
detection_timer = PeriodicTimer(1, enable_detection) target = Rectangle((0, 0), (300, 300), BLUE) face_rectangle = NO_FACE face_counter = 0 gate_is_closed = True with Camera() as camera: detection_timer.start() target.center_point = camera.size // 2 while True: frame = camera.frame if can_dectect: faces = detect_faces(frame) can_dectect = False if faces: face_rectangle = faces[0] if face_rectangle.position > target.position and face_rectangle.end_point < target.end_point: face_counter += 1 face_rectangle.color = GREEN if face_counter == 3 and gate_is_closed: face = frame[face_rectangle.position. x:face_rectangle.position.x + face_rectangle.size.x, face_rectangle. position.y:face_rectangle.position.y + face_rectangle.size.y]
# Prepare the true image obtained during onboard true_img = cv2.imread('true_img.png', 0) # Loads the image from file true_img = true_img.astype('float32')/255 # Cast to float32 data type true_img = cv2.resize(true_img, (92, 112)) # Reduce the number of pixels in image true_img = true_img.reshape(1, true_img.shape[0], true_img.shape[1], 1) # Change the shape of image array # 0 : No. of rows, 1 : Number of column video_capture = cv2.VideoCapture(0) # Returns video from the first webcam of computer preds = collections.deque(maxlen=15) # Deque : List-like container with fast appends and pops on both ends while True: # Capture frames from webcam _, frame = video_capture.read() # Returns image as array # Detect Faces frame, face_img, face_coords = face_detection.detect_faces(frame, draw_box=False) # Calling function # to detect face in image if face_img is not None: face_img = cv2.cvtColor(face_img, cv2.COLOR_BGR2GRAY) # Convert image to grayscale face_img = face_img.astype('float32')/255 # Cast to float32 data type face_img = cv2.resize(face_img, (92, 112)) # Reduce the number of pixels in image face_img = face_img.reshape(1, face_img.shape[0], face_img.shape[1], 1) #Change the shape of image array # 0 : No. of rows, 1 : Number of column preds.append(1-model.predict([true_img, face_img])[0][0]) x,y,w,h = face_coords # Coordinates of face in image if len(preds) == 15 and sum(preds)/15 >= 0.3: text = "Identity: {}".format(name) cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 5) elif len(preds) < 15: text = "Identifying ..."
import math # To perform mathamatical function import helper # Modules containing few functions import face_detection # Module to detect face in image video_capture = cv2.VideoCapture( 0) # Returns video from the first webcam of computer counter = 20 name = input("Enter your name : ") text_file = open("name.txt", "w") n = text_file.write(name) text_file.close() while True: _, frame = video_capture.read() # Returns image as array frame, face_box, face_coords = face_detection.detect_faces( frame) # Calling function to detect face in image. text = 'Image will be taken in {} seconds.'.format( math.ceil(counter)) # Display remaining sec to take image. if face_box is not None: frame = helper.write_on_frame(frame, text, face_coords[0], face_coords[1] - 10) # Calling function to draw # rectangle on image cv2.imshow('Video', frame) # Display an image in a window cv2.waitKey(1) # Wait for one millisecond counter -= 0.1 if counter <= 0: cv2.imwrite('true_img.png', face_box) # Save face image in current directory break
import cv2 import math import utils import face_detection video_capture = cv2.VideoCapture(0) counter = 5 while True: _, frame = video_capture.read() frame, face_box, face_coords = face_detection.detect_faces(frame) text = 'Image will be taken in {}..'.format(math.ceil(counter)) if face_box is not None: frame = utils.write_on_frame(frame, text, face_coords[0], face_coords[1] - 10) cv2.imshow('Video', frame) cv2.waitKey(1) counter -= 0.1 if counter <= 0: cv2.imwrite('true_img.png', face_box) break # 찍은 사진을 릴리즈한다 video_capture.release() cv2.destroyAllWindows() print("Onboarding Image Captured")
IMAGES_FOLDER = Path.cwd().parent.joinpath("images") images = [image for image in IMAGES_FOLDER.glob("*.png")] for image in images: image_name = str(image).split("/")[-1] PIL_image = Image.open(image) img = numpy.array(PIL_image) start = time.time() gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) detected_faces = face_detection.detect_faces(PIL_image) for face in detected_faces: bounding_box = face.bounding_box.flatten().astype("int") (x1, y1, x2, y2) = bounding_box width = x2 - x1 height = y2 - y1 cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 0), 2) roi_gray = gray[y1:y2, x1:x2] roi_color = img[y1:y2, x1:x2] end = time.time()
IMAGES_FOLDER = Path.cwd().parent.joinpath("images") images = [image for image in IMAGES_FOLDER.glob("*.png")] for image in images: image_name = str(image).split("/")[-1] PIL_image = Image.open(image).convert('RGB') img = numpy.array(PIL_image) start = time.time() gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) detected_faces = face_detection.detect_faces(gray) for (x, y, w, h) in detected_faces: cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2) roi_gray = gray[y:y + h, x:x + w] roi_color = img[y:y + h, x:x + w] end = time.time() print(image_name, end - start) cv2.imshow(image_name, img) while True: k = cv2.waitKey(30) & 0xff