Пример #1
0
def video():
    cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
    cap.isOpened()

    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter('output.avi', fourcc, 20.0, (640, 480))

    while (True):
        ret, frame = cap.read()

        if ret == True:
            # frame = cv2.flip(frame, 0)
            # out.write(frame)
            # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            detector = FER()
            print(detector.top_emotion(frame))

            # cv2.imshow('frame', frame)

            if 0xFF == ord('q'):
                break
        else:
            break

    cap.release()
    out.release()
    cv2.destroyAllWindows()
Пример #2
0
def upload():
    target = os.path.join(APP_ROOT, 'images/')
    print(target)
    if not os.path.isdir(target):
        os.mkdir(target)
    else:
        print("Couldn't create upload directory: {}".format(target))
        #print(request.files.getlist("file"))
    for upload in request.files.getlist("file"):
        print(upload)
        #print("{} is the file name".format(upload.filename))

        filename = upload.filename
        destination = "/".join([target, filename])
        upload.save(destination)

    folder = 'images'
    ex = folder + '/' + filename
    img = plt.imread(ex)
    detector = FER(mtcnn=True)
    emotion, score = detector.top_emotion(img)
    ans = a[emotion]

    return render_template("complete_display_image.html",
                           image_name=ex,
                           text=ans)
Пример #3
0
    def run(self):
        # Capture from Webcam
        width = 400  # Width of the captured video frame, units in pixels
        height = 300  # Height of the captured video frame, units in pixels
        video_capture_device = cv2.VideoCapture(0)
        video_capture_device.set(cv2.CAP_PROP_FRAME_WIDTH, width)
        video_capture_device.set(cv2.CAP_PROP_FRAME_HEIGHT, height)

        fps = 0  # initialize fps counter to 0
        detector = FER()  # initialize Facial Expression Recognition

        while True:
            startTime = time.time(
            )  # Get the start time for the fps calculation

            # I averaged about 20 fps, so 30 frames would allow for a time difference of a little more than a second
            frameCounter = 30  # Let the for loop count to this number before calculating new fps

            for i in range(
                    0, frameCounter
            ):  # for loop to allow a time difference to calculate fps
                if self.isInterruptionRequested():
                    video_capture_device.release()
                    return
                else:
                    ret, frame = video_capture_device.read()
                    if ret:
                        self.new_frame_signal.emit(frame)

                        # When no face is detected, the emotion [] array is empty, so when detector.top_emotion() is called,
                        # an IndexError is thrown. The try: will execute code when a face is detected, and therefore no IndexError.
                        # except IndexError as error: this code will execute when no face is detected, and the IndexError is thrown.
                        try:
                            emotion, score = detector.top_emotion(
                                frame
                            )  # get the top emotion and score from the video frame
                            UI.emotionMagLabel.setText(
                                "Score: " + str(score)
                            )  # Output the magnitude of emotion to GUI
                            UI.emotionTypeLabel.setText(
                                "Emotion: " +
                                emotion)  # Output the type of emotion to GUI

                        except IndexError as error:  # no face is detected
                            UI.emotionMagLabel.setText(
                                "Score N/A "
                            )  # Magnitude of emotion is unavailabe since no face is detected
                            UI.emotionTypeLabel.setText(
                                "Emotion N/A"
                            )  # Type of emotion is unavailabe since no face is detected

                        UI.outputFPS.setText(
                            "Frames Per Second: " +
                            str(fps))  # Output the current fps

            stopTime = time.time()  # Get the stop time for the fps calculation
            fps = round(
                frameCounter / float(stopTime - startTime), 3
            )  # calculate the current fps, and round the answer to 3 decimal places
class EmotionDetection():
    def __init__(self):

        self.detector = FER()

    def detect(self):
        self.img = cv2.imread("image/image.jpg")
        self.detector.detect_emotions(self.img)
        emotion, score = self.detector.top_emotion(self.img)
        print("The emotion of the person:", emotion)
Пример #5
0
def emotion(impath):
    try:
        print(impath)
        image = cv2.imread(impath)
        detector = FER()
        images = detector.detect_emotions(image)
        if len(images) == 0 or images == None:
            return None
        emot = (str(detector.top_emotion(image)[0]))
        return emot
    except IndexError:
        return None
Пример #6
0
    def findFace(self, i):
        flag = False

        # stworzenie katalogu dla zdjec (jesli nie istnieje (podana sciezka w loginInfo.py))
        if not os.path.exists(savePicturesDirectory):
            os.makedirs(savePicturesDirectory)

        # zrobienie screenshota
        self.driver.get_screenshot_as_file('screenshot' + str(i) + '.png')
        img.append(cv2.imread('screenshot' + str(i) + '.png'))

        face_cascade = cv2.CascadeClassifier(faceLibraryPath)

        #wycinamy część zdjęcia, żeby nie szukać twarzy na zdjęciach już sparowanych
        crop_img.append(img[i][100:650, 1000:1300])
        # zapisanie przycietego
        cv2.imwrite("cropp" + str(i) + ".png", crop_img[i])
        # szukanie twarzy
        img1.append(cv2.imread('cropp' + str(i) + '.png'))
        photoGray.append(cv2.cvtColor(img1[i], cv2.COLOR_BGR2GRAY))

        faces.append(
            face_cascade.detectMultiScale(photoGray[i],
                                          scaleFactor=1.1,
                                          minNeighbors=5,
                                          minSize=(30, 30),
                                          flags=cv2.CASCADE_SCALE_IMAGE))

        x = len(faces[i])
        if x == 0:
            print("no face detected")
            #jeśli nie znajdziemy twarzy na zdjęciu, zapisujemy je w folderze
            cv2.imwrite(savePicturesDirectory + str(i) + ".png", img1[i])
        else:
            # jesli znaleziono twarz to szukamy emocji
            detector = FER()
            emotion, score = detector.top_emotion(img1[i])
            print(emotion, score)

            if (emotion == 'angry' or emotion == 'sad') and score > 0.9:
                print('too angry for me')
                cv2.imwrite(savePicturesDirectory + str(i) + ".png", img1[i])

            else:
                print('Perfect')
                flag = True
        return flag
Пример #7
0
def camstream(camera):
    camera.init()
    DEVICE = 0
    SIZE = (640, 480)
    FILENAME = 'capture.png'
    display = pygame.display.set_mode(SIZE, 0)
    camera = pygame.camera.Camera(DEVICE, SIZE)
    camera.start()
    screen = pygame.surface.Surface(SIZE, 0, display)
    capture = True
    while capture:
        # photo = camera.get_image(screen)
        display.blit(screen, (0, 0))
        pygame.display.flip()
        for event in pygame.event.get():
            if event.type == QUIT:
                capture = False
            elif event.type == KEYDOWN and event.key == K_s:
                photo = camera.get_image(screen)
                pygame.image.save(
                    photo,
                    "E://Projects//Pygame experiment//images_without_bg//img.jpg"
                )
                img = plt.imread(
                    "E://Projects//Pygame experiment//images_without_bg//img.jpg"
                )

                detector = FER()

                print("Started processing..")
                try:
                    print(detector.top_emotion(img))
                    print("Ended Processing result :)")
                except:
                    print("Couldn't generate emotion analysis :(")
                # plt.imshow(img)

                # image = pygame.image.save(screen, FILENAME)
                # pygame.image.save(photo, "E://Projects//Pygame experiment//images_without_bg//img.jpg")
                # img = plt.imread("E://Projects//Pygame experiment//images_without_bg//img.jpg")
                # detector = FER(mtcnn=True)
                # print(detector.detect_emotions("E://Projects//Pygame experiment//images_without_bg//img.jpg"))
                # plt.imshow(img)

    camera.stop()
    # pygame.quit()
    return
Пример #8
0
    def post(self, request, format=None):

        serializer = ImageUploadSerializer(data=request.data)
        if serializer.is_valid():
            try:
                image = serializer.validated_data['image'].file.read()
                npimg = np.fromstring(image, dtype=np.uint8)
                img = cv2.imdecode(npimg, 1)
                detector = FER()
                emotion = detector.top_emotion(img)[0]
                res = Image.objects.all().filter(
                    emotion=emotion.upper())[0].link
                return Response(res, status=status.HTTP_200_OK)
            except IndexError:
                return Response(status=status.HTTP_400_BAD_REQUEST)
        return Response(serializer.errors,
                        status=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE)
class EmoBotAgent(AngleInterpolationAgent):
    def __init__(self, simspark_ip='localhost',
                 simspark_port=3100,
                 teamname='DAInamite',
                 player_id=0,
                 sync_mode=True):
        super(EmoBotAgent, self).__init__(simspark_ip, simspark_port, teamname, player_id, sync_mode)
        
        self.isAngry = False
        self.cap = cv2.VideoCapture(0)

        # Check if the webcam is opened correctly
        if not self.cap.isOpened():
            raise IOError("Cannot open webcam")

        self.detector = FER()
        
    def think(self, perception):
        if  not self.is_animating():
            ret, frame = self.cap.read()
            frame = cv2.resize(frame, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_AREA)

            emotion, score = self.detector.top_emotion(frame)
            print(emotion, score)
            
            if self.isAngry == True and (emotion != 'angry' or score == None or score < 0.6):
                print("not angry")
                self.start_animation(handsDown())
                self.isAngry = False

            elif emotion == 'angry' and score > 0.6 and self.isAngry == False:
                print("angry")
                self.start_animation(handsUp())
                self.isAngry = True

            elif emotion == 'happy' and score > 0.8:
                print("wave")
                self.start_animation(hello())
            
            elif emotion == 'sad' and score > 0.6:
                print("sad")
                self.start_animation(sad())

        return super(EmoBotAgent, self).think(perception)
Пример #10
0
def detect_motion(frameCount):
    # grab global references to the video stream, output frame, and
    # lock variables
    global vs, outputFrame, lock
    while True:
        # read the next frame from the video stream, resize it,
        # convert the frame to grayscale, and blur it
        frame = vs.read()
        frame = imutils.resize(frame, 600, 800)

        # grab the current timestamp and draw it on the frame
        timestamp = datetime.datetime.now()
        detector = FER()
        try:
            emotion, score = detector.top_emotion(frame)
        except:
            emotion, score = "finding...", 0
        cv2.putText(frame, emotion, (10, frame.shape[0] - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
        with lock:
            outputFrame = frame.copy()
Пример #11
0
def hello_world():
    # Fungsi ini tujuan untuk menampilkan emosi dari gambar
    # Tanpa menyimpan file tersebut ke dalam penyimpanan
    top = request.form.get('top')

    # Cek jika form data terdapat image
    if 'image' not in request.files:
        return {'message': 'No File Part'}
    image = request.files['image']

    # Cek jika image memiliki nama
    if image.filename == '':
        return {'message': 'No Selected File'}

    # Cek jika image ada dan tipe file dibolehkan
    if image and allowed_file(image.filename):
        # Buat gambar menjadi string
        image_string = image.read()
        #  Ubah ke dalam numpy
        np_img = numpy.fromstring(image_string, numpy.uint8)
        # Simpan ke dalam cv2
        img = cv2.imdecode(np_img, cv2.IMREAD_UNCHANGED)

        # Siapkan pendeteksi
        detector = FER()

        # Top = 1 jika ingin menampilkan emosi tertinggi
        if top == '1':
            emotion, score = detector.top_emotion(img)
            data = {'emotion': emotion, 'score': float(score)}
        # Top = 0 jika ingin menampilkan peluang beberapa emosi
        else:
            result = detector.detect_emotions(img)
            data = pd.DataFrame(result).to_json()

        return data

    return 'Hello, World!'
Пример #12
0
def getMoviethroughEmotion():
    global clicked

    def back(*args):
        clicked = True

    cap = cv2.VideoCapture(0)

    def mouse_click(event, x, y, flags, param):
        global clicked
        if event == cv2.EVENT_LBUTTONDOWN:
            print("clicked")
            clicked = True

    clicked = False
    while True:
        ret, image = cap.read()

        if not ret:
            break

        cv2.imshow("image", image)
        cv2.setMouseCallback("image", mouse_click)

        if clicked:
            break

        k = cv2.waitKey(1)
        if k == ord("q"):
            break

    detector = FER()
    emotion = detector.top_emotion(image)
    print(emotion)
    cv2.destroyAllWindows()
    cap.release()
    return emotion
# -*- coding: utf-8 -*-
"""Untitled0.ipynb

Automatically generated by Colaboratory.

Original file is located at
    https://colab.research.google.com/drive/1yA-cb7JozrTN3GaMLwhWENbCnqeB2YC_
"""

from fer import FER
import cv2

img = cv2.imread("/content/pic22.jpg")
detector = FER()
detector.detect_emotions(img)
emotion, score = detector.top_emotion(img)
score
emotion

from pyagender import PyAgender
agender = PyAgender()
faces = agender.detect_genders_ages(cv2.imread("/content/pic22.jpg"))
faces
Пример #14
0
from fer import FER
import cv2
import sys

sys.stdout.write('loading model...\n')
sys.stdout.flush()
detector = FER()
sys.stdout.write('loading model success !\n')
sys.stdout.flush()


while True:
    try:
        #print('ready')
        sys.stdout.write('ready\n')
        sys.stdout.flush()
        signal = sys.stdin.readline().strip() # expect 'go'
        if signal == 'go':
            emotion, score = detector.top_emotion(cv2.imread('out.jpg')) 
            sys.stdout.write(emotion + '\n')
            sys.stdout.flush()
        #print(emotion)
    except:
        emotion = 'no face'
    
import cv2
from fer import FER
import matplotlib.pyplot as plt

# Load the cascade
face_cascade = cv2.CascadeClassifier(
    'C:\\Users\\atharva\\Documents\\codes\\facedetection-master\\facedetection-master\\haarcascade_frontalface_default.xml'
)
camera = cv2.VideoCapture(0)

return_value, image = camera.read()
img = image

detector = FER()
f = detector.top_emotion(img)
print(f)
# Read the input image
#img = cv2.imread('C:\\Users\\atharva\\Documents\\codes\\facedetection-master\\facedetection-master\\test.jpg')

# Convert into grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

# Detect faces
faces = face_cascade.detectMultiScale(gray, 1.1, 4)

# Draw rectangle around the faces
for (x, y, w, h) in faces:
    cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
window_name = 'img'

# text
from fer import FER
import cv2
cap = cv2.VideoCapture(0)

# Check if the webcam is opened correctly
if not cap.isOpened():
    raise IOError("Cannot open webcam")

detector = FER()

while True:
    ret, frame = cap.read()
    frame = cv2.resize(frame, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_AREA)
    cv2.imshow('Input', frame)

    emotion, score = detector.top_emotion(frame)
    print(emotion, score)

    c = cv2.waitKey(1)
    if c == 27:
        break

cap.release()
cv2.destroyAllWindows()
Пример #17
0
while cv.waitKey(1) < 0:
    t = time.time()
    hasFrame, frame = cap.read()
    if not hasFrame:
        cv.waitKey()
        break

    frameFace, bboxes = getFaceBox(fNet, frame)
    if not bboxes:
        print("No face Detected, Checking next frame")
        continue
    
    for bbox in bboxes:
            face = frame[max(0,bbox[1]-padding):min(bbox[3]+padding,frame.shape[0]-1),max(0,bbox[0]-padding):min(bbox[2]+padding, frame.shape[1]-1)]
            blob = cv.dnn.blobFromImage(face, 1.0, (227, 227), MODEL_MEAN_VALUES, swapRB=False)
            gNet.setInput(blob)
            genderPreds = gNet.forward()
            gender = genderList[genderPreds[0].argmax()]
            print("Gender : {}, conf = {:.3f}".format(gender, genderPreds[0].max()))
            aNet.setInput(blob)
            agePreds = aNet.forward()
            age = ageList[agePreds[0].argmax()]
            print("Age : {}, conf = {:.3f}".format(age, agePreds[0].max()))
            detector = FER()
            emotion=detector.top_emotion(face)
            print(emotion[0])
            label = "{},{},{}".format(gender, age,emotion[0])
            cv.putText(frameFace, label, (bbox[0], bbox[1]-10), cv.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 255), 2, cv.LINE_AA)
            cv.imshow("Prediction", frameFace)
    print("time : {:.3f}".format(time.time() - t))
    
Пример #18
0
face_cascade = cv2.CascadeClassifier(
    "/content/haarcascade_frontalface_default.xml")
img = cv2.imread("/content/sample.jpg")
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray_img,
                                      scaleFactor=1.05,
                                      minNeighbors=5)

for x, y, w, h in faces:
    img = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 3)

cv2_imshow(img)
cv2.waitKey(0)
cv2.destroyAllWindows()

agender = PyAgender()
faces = agender.detect_genders_ages(img)
gender = faces[0]['gender']
age = int(faces[0]['age'])
if (gender < 0.5):
    print('G_Male')
else:
    print('G_Female')

print('age =', age)

detector = FER()
result = detector.top_emotion(img)
print('Emotion :', result[0])
Пример #19
0
    # Variable for deciding placement of text in images
    pos_x, pos_y = (450, 184)
    capture = True
    print("Loop start")
    detector = FER()
    counter = 0
    while capture:
        counter += 1
        print(counter)
        pygame.time.Clock().tick(10)
        try:
            ret, frame = cam.read()
            print(detector.detect_emotions(frame))
            # print(detector.top_emotion(frame))
            emotion = detector.top_emotion(frame)[0]
            print("Done :)")
            screen.fill(bg_color)

            if emotion == 'happy':
                # For image
                image_display(emotion)

            elif emotion == 'neutral':
                # For image
                image_display(emotion)

            elif emotion == 'sad':
                # For image
                image_display(emotion)
Пример #20
0
def main():
	"""Análisis de sentimientos"""

	st.title("Aplicación de aprendizaje automático")
	st.text("Análisis de sentimiento en fotos y videos")

	activites = ["Imagen","Video"]

	choice = st.sidebar.selectbox("Seleccione",activites)

	if choice == 'Imagen':
		st.subheader("Análisis de imagen")
		uploaded_file = st.file_uploader("Subir imagen",type=["png","jpg"])
		if uploaded_file is not None:

			file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)
			opencv_image = cv2.imdecode(file_bytes, 1)
			st.image(opencv_image, channels="BGR", use_column_width=True)
			detector = FER()
			#detector = FER(mtcnn=True)
			result = detector.detect_emotions(opencv_image)
			bounding_box = result[0]["box"]
			emotions = result[0]["emotions"]
			result_df=DataFrame(result)
			result_dic=result_df.loc[: , "emotions"][0]
			result_dic_df=pd.Series(result_dic).to_frame(name="Score")

			my_bar = st.progress(0)

			for percent_complete in range(100):
				time.sleep(0.1)
				my_bar.progress(percent_complete + 1)

			cv2.rectangle(
				opencv_image,
				(bounding_box[0], bounding_box[1]),
				(bounding_box[0] + bounding_box[2], bounding_box[1] + bounding_box[3]),
				(0, 155, 255),
				2,
			)
			for idx, (emotion, score) in enumerate(emotions.items()):
				color = (211, 211, 211) if score < 0.01 else (0, 255, 0)
				emotion_score = "{}: {}".format(
					emotion, "{:.2f}".format(score) if score > 0.01 else ""
				)
				cv2.putText(
					opencv_image,
					emotion_score,
					(bounding_box[0], bounding_box[1] + bounding_box[3] + 30 + idx * 15),
					cv2.FONT_HERSHEY_SIMPLEX,
					0.5,
					color,
					1,
					cv2.LINE_AA,
				)
			cv2.imwrite("imagen_result.jpg", opencv_image)
			st.subheader('Imagen analizada')
			st.image(opencv_image, channels="BGR", use_column_width=True)
			st.markdown(get_binary_file_downloader_html("imagen_result.jpg", 'Imagen'), unsafe_allow_html=True)

			emocion, puntaje = detector.top_emotion(opencv_image) 
			st.write("Emoción predominante: ", emocion)
			st.write("Puntaje: ", puntaje)

			st.markdown("---")
			st.subheader('Tabla de resultados')
			st.table(result_dic_df)
			st.subheader('Gráfica')
			st.line_chart(result_dic_df)



	elif choice == 'Video':
		st.subheader("Análisis de video")
		uploaded_video = st.file_uploader("Subir video en .mp4",type=["mp4"])

		if uploaded_video is not None:
			video_bytes = np.asarray(bytearray(uploaded_video.read()), dtype=np.uint8)
			st.video(video_bytes)
			#video_path = st.text_input('CSV file path')

			temporary_location = "video_result.mp4"
			with open(temporary_location, 'wb') as out:  ## Open temporary file as bytes
				out.write(video_bytes)  ## Read bytes into file
			out.close()
			video = Video("video_result.mp4")
			# Analyze video, displaying the output
			detector = FER()
			raw_data = video.analyze(detector, display=True)
			
			
			# Barra progreso
			my_bar = st.progress(0)
			for percent_complete in range(100):
				time.sleep(0.1)
				my_bar.progress(percent_complete + 1)
			st.markdown("---")
			st.subheader('Tabla de resultados')			
			# Resultados			
			df = video.to_pandas(raw_data)
			st.table(df.head())
			# Plot emotions
			medias=df[['angry','disgust','fear','happy','sad','surprise','neutral']].mean()
			st.write(medias)
			st.bar_chart(medias)
			st.subheader('Gráfica')	
			st.line_chart(df[['angry','disgust','fear','happy','sad','surprise','neutral']])
	
	elif choice == 'Créditos':
		st.subheader("Créditos")
		st.text("Jorge O. Cifuentes")
		st.write('*[email protected]* :sunglasses:')
Пример #21
0
# manual interface.
#client.loop_forever()

detector = FER()

cam = PiCamera()
#cam.start_preview()
while (1):
    sleep(1)
    cam.capture('/home/pi/Desktop/EE522/Final/selfie.jpg')
    selfie = cv.imread('/home/pi/Desktop/EE522/Final/selfie.jpg')
    sleep(1)
    #cam.stop_preview()
    #detector.detect_emotions(selfie)
    #image = cv2.imread(imagePath)
    gray = cv.cvtColor(selfie, cv.COLOR_BGR2GRAY)
    faceCascade = cv.CascadeClassifier(cv.data.haarcascades +
                                       "haarcascade_frontalface_default.xml")
    faces = faceCascade.detectMultiScale(gray,
                                         scaleFactor=1.3,
                                         minNeighbors=3,
                                         minSize=(30, 30))

    #print("Found {0} Faces!".format(len(faces)))
    if len(faces) > 0:
        emotion, score = detector.top_emotion(selfie)
        print(emotion)
        client.publish("data/emotion", emotion)  #publish
    else:
        print("Face not found.")