Esempio n. 1
0
 def test_batch_fer_pil_image(self):
     test_data = [
         Image.open(os.path.normpath(os.path.join(DIR, "data/fear.png")))
     ]
     response = fer(test_data, api_key=self.api_key)
     self.assertTrue(isinstance(response, list))
     self.assertTrue(isinstance(response[0], dict))
Esempio n. 2
0
    def test_bad_fer(self):
        fer_set = set(['Angry', 'Sad', 'Neutral', 'Surprise', 'Fear', 'Happy'])
        test_face = np.linspace(0,50,56*56).reshape(56,56).tolist()
        response = fer(test_face)

        self.assertTrue(isinstance(response, dict))
        self.assertEqual(fer_set, set(response.keys()))
Esempio n. 3
0
 def test_batch_fer_good_b64(self):
     test_data = [
         "iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAg5JREFUeNrEV4uNgzAMpegGyAgZgQ3KBscIjMAGx03QEdqbgG5AOwG3AWwAnSCXqLZkuUkwhfYsvaLm5xc7sZ1dIhdtUVjsLZRFTvp+LSaLq8UZ/s+KMSbZCcY5RV9E4QQKHG7QtgeCGv4PFt8WpzkCcztu3TiL0eJgkQmsVFn0MK+LzYkRKEGpG1GDyZdKRdaolhAoJewXnJsO1jtKCFDlChZAFxyJj2PnBRU20KZg7oMlOAENijpi8hwmGkKkZW2GzONtVLA/DxHAhTO2I7MCVBSQ6nGDlEBJDhyVYiUBHXBxzQm0wE4FzPYsGs856dA9SAAP2oENzFYqR6iAFQpHIAUzO/nxnOgthF/lM3w/3U8KYXTwxG/1IgIulF+wPQUXDMl75UoJZIHstRWpaGb8IGYqwBoKlG/lgpzoUEBoj50p8QtVrmHgaaXyC/H3BFC+e9kGFlCB0CtBF7FifQ8D9zjQQHj0pdOM3F1pUBoFKdxtqkMClScHJCSDlSxhHSNRT5K+FaZnHglrz+AGoxZLKNLYH6s3CkkuyJlp58wviZ4PuSCWDXl5hmjZtxcSCGbDUD3gK7EMOZBLCETrgVBF5K0lI5bIZ0wfrYh8NWHIAiNTPHpuTOKpCes1VTFaiNaFdGwPfdmaqlj6LmjJbgoSSfUW74K3voz+/W0oIeB7HWu2s+dfx3N+eLX8CTAAwUmKjK/dHS4AAAAASUVORK5CYII="
     ]
     response = fer(test_data, api_key=self.api_key)
     self.assertTrue(isinstance(response, list))
     self.assertTrue(isinstance(response[0], dict))
Esempio n. 4
0
    def test_bad_fer(self):
        fer_set = set(['Angry', 'Sad', 'Neutral', 'Surprise', 'Fear', 'Happy'])
        test_face = os.path.normpath(os.path.join(DIR, "data/64by64.png"))
        response = fer(test_face)

        self.assertTrue(isinstance(response, dict))
        self.assertEqual(fer_set, set(response.keys()))
Esempio n. 5
0
    def test_good_fer(self):
        fer_set = set(['Angry', 'Sad', 'Neutral', 'Surprise', 'Fear', 'Happy'])
        test_face = np.random.rand(48, 48).tolist()
        response = fer(test_face)

        self.assertTrue(isinstance(response, dict))
        self.assertEqual(fer_set, set(response.keys()))
Esempio n. 6
0
    def test_int_array_fer(self):
        fer_set = set(["Angry", "Sad", "Neutral", "Surprise", "Fear", "Happy"])
        test_face = os.path.normpath(os.path.join(DIR, "data/48by48.png"))
        response = fer(test_face)

        self.assertTrue(isinstance(response, dict))
        self.assertEqual(fer_set, set(response.keys()))
Esempio n. 7
0
def urlsToImages(imgUrls):
    denom = len(imgUrls)
    imgInfos = []
    i = 0

    for i, url in enumerate(imgUrls):
        try:
            imgInfo = indicoio.fer(url, detect=True)
            imgInfos.append(imgInfo)
            i += 1
            print('%d / %d' % (i, denom))

        except indicoio.utils.errors.IndicoError:
            imgInfos.append({})
            i += 1
            print('%d / %d' % (i, denom))

        except requests.exceptions.ConnectionError:
            pass

    print('Smoothening...')
    imgs = list(map(Image.open, imgUrls))
    smoothenFaces(imgInfos, imgs[0].size[0], imgs[0].size[1])
    # with Image.open(imgUrls[0]) as first:
    #   smoothenFaces(imgInfos, first.size[0], first.size[1])

    print('Adding emojis...')
    for img, imgInfo in zip(imgs, imgInfos):
        for faceInfo in imgInfo:
            pasteEmojis_effectful(img, faceInfo)

    return imgs
Esempio n. 8
0
 def test_happy_fer_pil(self):
     test_face = Image.open(
         os.path.normpath(os.path.join(DIR, "data/happy.png"))
     ).convert("L")
     response = fer(test_face)
     self.assertTrue(isinstance(response, dict))
     self.assertTrue(response["Happy"] > 0.5)
Esempio n. 9
0
def indico():
    indicoio.config.api_key = "3e19af4454ebe0932333aff84913d88d"
    gameCount = request.form["gameCount"]
    emotion = request.form["emotion"]
    photoUrl = request.form["photoUrl"]

    result = {}

    emotions = indicoio.fer(photoUrl, detect=True, sensitivity=0.4)[0]['emotions']

    highestVals = {}
    highestVals["happyVal"] = emotions["Happy"] + emotions["Neutral"]
    highestVals["sadVal"] = emotions["Sad"] + emotions["Angry"]
    highestVals["fearVal"] = emotions["Fear"] + emotions["Surprise"]

    highestValsKey = max(highestVals.iteritems(), key=operator.itemgetter(1))[0]
    highestKeys = max(emotions.iteritems(), key=operator.itemgetter(1))

    for highestKey in highestKeys:
        if (highestKey == "Happy" or highestKey == "Neutral" and highestValsKey == "happyVal") or (highestKey == "Sad" or highestKey == "Angry" and highestValsKey == "sadVal") or (highestKey == "Fear" or highestKey == "Surprise" and highestValsKey == "fearVal"):
            if highestKey == emotion:
                result["feedback"] = "You are correct!"
                g.db.execute('insert into playthrough (score,playerid,gameid) values (?,?,?)',[1, session['userid'], session["game_id"]])
            else:
                result["feedback"] = "Not quite - try again!"
                g.db.execute('insert into playthrough (score,playerid,gameid) values (?,?,?)',[0, session['userid'], session["game_id"]])
            g.db.commit()
            result[highestKey] = emotions[highestKey]
            return jsonify(result)

    highestKey = highestKeys[0]
    result[highestKey] = emotions[highestKey]
    return jsonify(result)
Esempio n. 10
0
def gifUrlToFrames(url):
    gif = Image.open(url)
    imgs = []
    i = 0

    try:
        while 1:
            gif.seek(i)
            frame = gif.copy()

            w, h = frame.size
            if w % 2: w -= 1
            if h % 2: h -= 1

            frame = frame.crop((0, 0, w, h)).convert('RGB')

            try:
                imgInfo = indicoio.fer(numpy.array(frame), detect=True)
                [ pasteEmojis_effectful(frame, faceInfo) for faceInfo in imgInfo ]
                imgs.append(frame)
                i += 1
            except indicoio.utils.errors.IndicoError: 
                imgs.append(frame)
                i += 1
            except requests.exceptions.ConnectionError:
                pass

    except EOFError:
        return imgs
Esempio n. 11
0
    def test_good_fer(self):
        fer_set = set(['Angry', 'Sad', 'Neutral', 'Surprise', 'Fear', 'Happy'])
        test_face = np.random.rand(48,48).tolist()
        response = fer(test_face)

        self.assertTrue(isinstance(response, dict))
        self.assertEqual(fer_set, set(response.keys()))
Esempio n. 12
0
def home(request):    
    image = HomeImages.objects.latest("datetime")
   
    data =  indicoio.fer(image.image.path) 
   
    data = max(data.items(), key=operator.itemgetter(1))[0]   
    
    image.emotion = data   
    image.save()
    
    emotion_list = lookup_table[image.emotion]
    total = len(emotion_list)
    counter = 10
    youtubVideos = []
    
    for x in range(total):
        if counter==0:            
            break
        counter = counter -1  
        someData = emotion_list[random.randrange(0,total)]
        while(someData==image.emotion):
            someData = emotion_list[random.randrange(0,total)]
        youtubVideos += youTubeSearch(someData, 3)
                             
    return render(request,"home/home.html",{"image":image,"youtubVideos":youtubVideos})
Esempio n. 13
0
def gifUrlToFrames(url):
	gif = Image.open(url)
	imgs = []
	i = 0

	try:
		while 1:
			gif.seek(i)
			frame = gif.copy()

			w, h = frame.size
			if w % 2: w -= 1
			if h % 2: h -= 1
			frame = frame.crop((0, 0, w, h)).convert('RGB')

			try:
				imgInfo = indicoio.fer(numpy.array(frame), detect=True)
				[ pasteEmojis_effectful(frame, faceInfo) for faceInfo in imgInfo ]
				imgs.append(frame)
				i += 1
			except indicoio.utils.errors.IndicoError: 
				imgs.append(frame)
				i += 1
			except requests.exceptions.ConnectionError:
				pass

	except EOFError:
		return imgs
Esempio n. 14
0
def urlsToImages(imgUrls):
	denom = str(len(imgUrls))
	imgInfos = []
	i = 0

	for i, url in enumerate(imgUrls):
		try:
			imgInfo = indicoio.fer(url, detect=True)
			imgInfos.append(imgInfo)
			i += 1
			print(str(i) + '/' + denom)
		except indicoio.utils.errors.IndicoError:
			imgInfos.append({})
			i += 1
			print(str(i) + '/' + denom)
		except requests.exceptions.ConnectionError:
			pass

	print('Smoothening...')
	imgs = [ Image.open(url) for url in imgUrls ]
	smoothenFaces(imgInfos, imgs[0].size[0], imgs[0].size[1])

	print('Adding emojis...')
	for img, imgInfo in zip(imgs, imgInfos):
		for faceInfo in imgInfo:
			pasteEmojis_effectful(img, faceInfo)

	return imgs
 def analyze_profile_pict(self):
     try:
         self.profile_pict_stats = Factor(
             indicoio.fer(self.person.get_profile_pict()).items(),
             'Profile picture emotions stats')
         self.plotter.add_factor(self.profile_pict_stats)
     except IndicoError:
         raise PersonAnalyzerException(
             'Error while fetching data from indicoio')
Esempio n. 16
0
def findEmotion(thePath):
    #finds the result of the emotions
    theResult = indicoio.fer(thePath)

    #finds the biggest emotion
    maxI = max(
        theResult,
        key=theResult.get)  # Just use 'min' instead of 'max' for minimum.
    return maxI
def image_analysis(filepath):
	indicoio.config.api_key = '428b1c1039ed8d8eaa886ee88044debd'
	#print(indicoio.sentiment_hq('indico is so easy to use!'))
	#filepath = "image0.jpg"
	#Image.LOAD_TRUNCATED_IMAGES = True
	#pixel_array = skimage.io.imread('filepath')
	image = Image.open(filepath)
	pixel_array = np.array(image)
	#print (indicoio.fer(pixel_array))
	return indicoio.fer(pixel_array)
def score():
	list_of_acronyms = ['ANT', 'ARC', 'ART', 'AST', 'CBE', 'CHM', 'CEE', 'CLA', 'COM', 'COS', 'EAS', 'EEB', 'ECO', 'ELE', 'ENG', 'FRE', 'GEO', 'GER', 'HIS', 'MAT', 'MAE', 'MOL', 'MUS', 'NES', 'ORF', 'PHI', 'PHY', 'POL', 'PSY', 'REL', 'SLA', 'SOC', 'SPA', 'WWS'];
	dept_scores = [];
	for department in list_of_acronyms:
		directory = "/Users/Valerie/Documents/HackPrinceton/" + department;
		photos = [ f for f in listdir(directory) if isfile(join(directory,f)) ];
		this_dept_scores = [];
		for photo in photos:
			path = directory + "/" + photo;
			pixel_array = skimage.io.imread(path).tolist()
			this_dept_scores.append(fer(pixel_array));
		dept_scores.append(this_dept_scores);
	def loadRecentPosts(self,recent_tags, api):

		for tag in recent_tags:
			#split the string returned to get users id
			temp, new_id = tag.id.split('_')
			user = api.user(new_id)

			#gets amount of posts user has made
			postCount = user.counts['media']
			#gets the amount of followers the user has
			followers = user.counts['followed_by']
			#gets the amount of people the user is following
			following = user.counts['follows']
			#gets the number of likes of the post
			likes = tag.like_count

			print 'Post Number:', self.numPosts
			print likes, 'likes'
			print "Users Number of Posts:", postCount
			print "Followers:", followers
			print "Following:", following

			# Checks each word in caption to see if it is positive, neutral or negative and
			# puts it into a list then calculates its radius based on number of followers
			if tag.caption is not None:
				print(tag.caption.text)
				sentiment = indicoio.sentiment_hq(tag.caption.text)
				if sentiment >= 0.66:
					self.positivePosts+=1
					self.positiveY.append(sentiment*100)
					self.positiveX.append(self.numPosts%(MAX_COUNT/3))
					self.positiveRadius = self.calculateRadius(self.positiveRadius,followers)
				elif sentiment >= 0.33:
					self.neutralPosts+=1
					self.neutralY.append(sentiment*100)
					self.neutralX.append(self.numPosts%(MAX_COUNT/3))
					self.neutralRadius = self.calculateRadius(self.neutralRadius,followers)
				else:
					self.negativePosts+=1
					self.negativeY.append(sentiment*100)
					self.negativeX.append(self.numPosts%(MAX_COUNT/3))
					self.negativeRadius = self.calculateRadius(self.negativeRadius,followers)
					
			#Use Indico API to calculate image sentiment
			imageUrl = tag.images['low_resolution'].url
			self.imageSentiment.append(indicoio.fer(imageUrl))

			print # separate each post with a new line
			self.numPosts+=1
Esempio n. 20
0
def img_handler():
    """
    - Grabs the binary data from the WebRTC still grab attached to POST
    - WebRTC is intuitive--unicode or ASCII byte-encodings not-so-much.
      Manipulates the unicode that Python gets from the POST request form dictionary
      and turns it into the appropriate ASCII byte-encoding, which is then base-64-decoded,
      and then piped into a random UUID-named .png file.
    - As a hack, I used sshfs to mount the public_html directory of my UT Austin CS account address
      into my working dir, and sent the new .png files into that folder, chmodding per upload.
    - This renders the image into a resource that's easily accessible by APIs.
      (Although this obviously won't scale, I only have 2 GB as an undergrad.)
    - Finally, sends the URL via POST to the Microsoft Emotions API
    - tl;dr I changed an image data-uri to a publicly available URL so it'd play better with some
      ML libraries that didn't have native Python clients, but did have RESTful APIs.
    """
    data = request.form.get('stillIn')
    data = data[22:].encode('latin-1')
    binary_data = a2b_base64(data)
    session['uuid'] = str(uuid4())
    fn = session['uuid'] + ".png"
    with open('./models/mount/{}'.format(fn), 'wb') as fd:
        fd.write(binary_data)
    subprocess.call("chmod 755 ./models/mount/{}".format(fn),
                    shell=True)
    resource = "http://cs.utexas.edu/~rainier/{}".format(fn)
    print json.dumps({'url': resource})

    # msft request
    try:
        msft_url = "https://api.projectoxford.ai/emotion/v1.0/recognize"
        headers = {'Ocp-Apim-Subscription-Key': config['MSFT_EMOTION_KEY'],
                   'Content-Type': 'application/json'}
        msft_req = requests.post(url=msft_url, data=json.dumps({'url': resource}), headers=headers)
        print "msft {}".format(msft_req.json())
    except:
        flash('No face was detected!')
        return redirect('/', messages=get_flashed_messages())
    session['msft'] = msft_parse(msft_req.json())

    # indicoio request
    session['indico'] = indicoio.fer(resource)

    # clarifai request
    clarifai_api = ClarifaiApi()
    clarifai_req = clarifai_api.tag_image_urls(resource)
    session['clarifai'] = clarifai_parse(clarifai_req)

    return redirect('/results')
Esempio n. 21
0
def captureImage(request):
    image =  request.POST.get('image',None)
    image = image.split('data:image/png;base64,')[1]    
    imageName = str(uuid.uuid1())+"out.png"    
    imagePath = MEDIA_ROOT+"/img/person/"+imageName
    g = open(imagePath, "w")  
    g.write(base64.decodestring(image))
    g.close()  
    img = HomeImages.objects.create()
    img.image.save(imageName,File(open(imagePath, 'r')) )  
    data =  indicoio.fer(imagePath) 
   
    data = max(data.items(), key=operator.itemgetter(1))[0]   
    
    img.emotion = data   
    img.save()
    return HttpResponse("")
Esempio n. 22
0
def score():
    list_of_acronyms = [
        'ANT', 'ARC', 'ART', 'AST', 'CBE', 'CHM', 'CEE', 'CLA', 'COM', 'COS',
        'EAS', 'EEB', 'ECO', 'ELE', 'ENG', 'FRE', 'GEO', 'GER', 'HIS', 'MAT',
        'MAE', 'MOL', 'MUS', 'NES', 'ORF', 'PHI', 'PHY', 'POL', 'PSY', 'REL',
        'SLA', 'SOC', 'SPA', 'WWS'
    ]
    dept_scores = []
    for department in list_of_acronyms:
        directory = "/Users/Valerie/Documents/HackPrinceton/" + department
        photos = [f for f in listdir(directory) if isfile(join(directory, f))]
        this_dept_scores = []
        for photo in photos:
            path = directory + "/" + photo
            pixel_array = skimage.io.imread(path).tolist()
            this_dept_scores.append(fer(pixel_array))
        dept_scores.append(this_dept_scores)
Esempio n. 23
0
def calculateSentimate(path):
	indicoio.config.api_key = '1329497cc5ba31312cd9f0777ffb4bad'
	#filepath='/home/suparna/Pictures/1.png'
	print(indicoio.fer(path))
	return indicoio.fer(path)
def get_user_prediction(fit):
	url = 'https://api.instagram.com/v1/users/self/?access_token=%s' % access_token
	resp = requests.get(url=url)
	data = resp.json()
	followers = data['data']['counts']['followed_by']
	follows = data['data']['counts']['follows']
	day = 0
	hour_float = 0.0
	image_url = ''
	new_post = []
	count = 4
	recent_media, next = api.user_recent_media(user_id='self', count=count)
	for i, media in enumerate(recent_media):
		new_post = []
		image_url = media.get_standard_resolution_url()

		day = media.created_time.weekday()
		hour = str(media.created_time.hour) + ':' + str(media.created_time.minute)
		likes = media.like_count
		hashtags = len(media.tags)

		if i == count-1:
			captionSentiment = 0.5
			if media.caption != None:
				caption = media.caption.text.replace('\n', ' ').replace('\r', ' ').encode('utf-8')
				captionSentiment = indicoio.sentiment(caption)
			fer = indicoio.fer(image_url)

			new_hour, minute = hour.split(':')
			hour_float = new_hour
			
			new_post.append(captionSentiment)
			new_post.append(hour_float)
			new_post.append(follows)
			new_post.append(fer['Angry'])
			new_post.append(hashtags)
			new_post.append(day)
			new_post.append(fer['Neutral'])
			new_post.append(followers)
			new_post.append(fer['Surprise'])
			if follows > 0:
				new_post.append(float(followers)/follows)
			else:
				new_post.append(float(followers))
			new_post.append(fer['Sad'])
			new_post.append(fer['Fear'])
			new_post.append(fer['Happy'])

			if followers > 0:
				target = float(likes) / followers
			else:
				target = float(likes)

	print image_url
	print day
	print hour
	for i in xrange(0,24):
		new_post[1] = i
		predict = fit.predict(new_post)
		prediction = predict[0]*followers
		print "Predicted:", int(prediction)
		print "Actual:", likes
		print
		print
Esempio n. 25
0
def get_emotions(face):
    # import numpy as np
    # face = np.zeros((48, 48)).tolist()
    emotions = fer(face, api_key=INDICO_API_KEY)
    return emotions
Esempio n. 26
0
 def test_batch_fer_pil_image(self):
     test_data = [Image.open(os.path.normpath(os.path.join(DIR, "data/fear.png")))]
     response = fer(test_data, api_key=self.api_key)
     self.assertTrue(isinstance(response, list))
     self.assertTrue(isinstance(response[0], dict))
Esempio n. 27
0
 def test_batch_fer_good_b64(self):
     test_data = ["iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAg5JREFUeNrEV4uNgzAMpegGyAgZgQ3KBscIjMAGx03QEdqbgG5AOwG3AWwAnSCXqLZkuUkwhfYsvaLm5xc7sZ1dIhdtUVjsLZRFTvp+LSaLq8UZ/s+KMSbZCcY5RV9E4QQKHG7QtgeCGv4PFt8WpzkCcztu3TiL0eJgkQmsVFn0MK+LzYkRKEGpG1GDyZdKRdaolhAoJewXnJsO1jtKCFDlChZAFxyJj2PnBRU20KZg7oMlOAENijpi8hwmGkKkZW2GzONtVLA/DxHAhTO2I7MCVBSQ6nGDlEBJDhyVYiUBHXBxzQm0wE4FzPYsGs856dA9SAAP2oENzFYqR6iAFQpHIAUzO/nxnOgthF/lM3w/3U8KYXTwxG/1IgIulF+wPQUXDMl75UoJZIHstRWpaGb8IGYqwBoKlG/lgpzoUEBoj50p8QtVrmHgaaXyC/H3BFC+e9kGFlCB0CtBF7FifQ8D9zjQQHj0pdOM3F1pUBoFKdxtqkMClScHJCSDlSxhHSNRT5K+FaZnHglrz+AGoxZLKNLYH6s3CkkuyJlp58wviZ4PuSCWDXl5hmjZtxcSCGbDUD3gK7EMOZBLCETrgVBF5K0lI5bIZ0wfrYh8NWHIAiNTPHpuTOKpCes1VTFaiNaFdGwPfdmaqlj6LmjJbgoSSfUW74K3voz+/W0oIeB7HWu2s+dfx3N+eLX8CTAAwUmKjK/dHS4AAAAASUVORK5CYII="]
     response = fer(test_data, api_key=self.api_key)
     self.assertTrue(isinstance(response, list))
     self.assertTrue(isinstance(response[0], dict))
Esempio n. 28
0
def get_test():
    data = request.get_json(force=True)
    sentiment = indicoio.fer(data['uri'])
    human_response = evaluate_emotions(sentiment)
    return jsonify({
    	'action': human_response })
Esempio n. 29
0
                        text += "n"
                text += " " + second_color["class"][:-5] + " " + second_word["class"]
                if third_word["score"] != 0:
                    text += " or a"
                    if third_word["class"][0].lower() in "aeiou":
                        text += "n"
                    text += " " + third_word["class"]
                text += "."
            print "\nGenerated Text:", text
            subprocess.call(["pico2wave", "-w", "description.wav",text])
            subprocess.call(["aplay", "description.wav"])
            #time.sleep(0.1)
        else:
            cam.capture("face.jpg")
            #out = ino.image_recognition("image.jpg", top_n=5)
            emotions = ino.fer("face.jpg")
            #print(emotions)
            emotes = sorted(emotions, key=lambda k: -emotions[k])[:3]
            semotes = []
            for emote in emotes:
                if emote == "Sad": semotes.append("sad")
                elif emote == "Fear": semotes.append("afraid")
                elif emote == "Angry": semotes.append("mad")
                elif emote == "Neutral": semotes.append("neutral")
                elif emote == "Happy": semotes.append("happy")
                else: semotes.append("surprised")
            text = "The person is " + semotes[0] + ", " + semotes[1] + ", and " + semotes[2] + "."
            print text
            subprocess.call(["pico2wave", "-w", "emotion.wav",text])
            subprocess.call(["aplay", "emotion.wav"])
Esempio n. 30
0
 def test_fear_fer(self):
     test_face = self.load_image("../data/fear.png", as_grey=True)
     response = fer(test_face)
     self.assertTrue(isinstance(response, dict))
     self.assertTrue(response['Fear'] > 0.25)
Esempio n. 31
0
 def test_batch_fer_filepath(self):
     test_data = [os.path.normpath(os.path.join(DIR, "data/fear.png"))]
     response = fer(test_data)
     self.assertTrue(isinstance(response, list))
     self.assertTrue(isinstance(response[0], dict))
Esempio n. 32
0
def capture():
    try:
        global foo
        img = cam.get_image()
        pygame.image.save(img, "test" + str(foo) + ".jpg")
        os.rename(
            "/home/lx_user/Documents/programming/web/uanimate/test" +
            str(foo) + ".jpg",
            "/home/lx_user/Documents/programming/web/uanimate/test/test" +
            str(foo) + ".jpg")
        dict = indicoio.facial_localization(
            "/home/lx_user/Documents/programming/web/uanimate/test/test" +
            str(foo) + ".jpg")
        image = pygame.image.load(
            "/home/lx_user/Documents/programming/web/uanimate/test/test" +
            str(foo) + ".jpg")
        c_id = 0
        #fear is disgust
        happy = 0.0
        sad = 0.0
        angry = 0.0
        fear = 0.0
        surprise = 0.0
        neutral = 0.0
        total = 0.0

        for i in range(0, int(len(dict))):
            if (dict[i]['bottom_right_corner'][0] -
                    dict[i]['top_left_corner'][0] > 60000
                    or dict[i]['bottom_right_corner'][1] -
                    dict[i]['top_left_corner'][1] > 60000
                    or dict[i]['top_left_corner'][0] > 60000
                    or dict[i]['top_left_corner'][1] > 60000):
                continue
            crop_rect = (dict[i]['top_left_corner'][0],
                         dict[i]['top_left_corner'][1],
                         abs(dict[i]['bottom_right_corner'][0] -
                             dict[i]['top_left_corner'][0]),
                         abs(dict[i]['bottom_right_corner'][1] -
                             dict[i]['top_left_corner'][1]))
            cropped = image.subsurface(crop_rect)
            pygame.image.save(cropped,
                              str(foo) + "cropped" + str(c_id) + ".jpg")

            # code for the emotion
            results = indicoio.fer(str(foo) + "cropped" + str(c_id) + ".jpg")
            # print(results)

            if ('Happy' in results):
                happy += results['Happy']
            if ('Sad' in results):
                sad += results['Sad']
            if ('Angry' in results):
                angry += results['Angry']
            if ('Fear' in results):
                fear += results['Fear']
            if ('Surprise' in results):
                surprise += results['Surprise']
            total += (happy + sad + angry + fear + surprise)

            os.rename(
                "/home/lx_user/Documents/programming/web/uanimate/" +
                str(foo) + "cropped" + str(c_id) + ".jpg",
                "/home/lx_user/Documents/programming/web/uanimate/test/" +
                str(foo) + "cropped" + str(c_id) + ".jpg")
            c_id += 1

        #open the file for reading:
        file = open(
            '/home/lx_user/Documents/programming/web/uanimate/results.txt',
            'w')
        #convert to string:
        file.write(
            str(happy) + " " + str(sad) + " " + str(angry) + " " + str(fear) +
            " " + str(surprise) + " " + str(total))
        file.close()
        foo += 1
    except (RuntimeError, TypeError, NameError, ValueError, IndexError,
            IOError, KeyError):
        print("wtf")
Esempio n. 33
0
import indicoio

indicoio.config.api_key = '1329497cc5ba31312cd9f0777ffb4bad'
filepath='
print(indicoio.fer(filepath))
import indicoio
import sys
import os

indicoio.config.api_key = ''



source_image_directory = '../CK+'
source_image_lst = '../CK+/label_crop_face.lst'
result_file = 'ck+_indicoio.lst'

expression_list = ['Angry','Sad','Neutral','Surprise','Fear','Happy']
fo = open(result_file,'a')
fi = open(source_image_lst,'r')
lines = fi.readlines()

for l in lines:

	arr = l.split()
	file_path = os.path.join(source_image_directory,arr[0])
	print 'doing '+file_path+' '+str(l)+' of '+str(len(lines))
	result = indicoio.fer(file_path)
	fo.write(file_path+'\n')
	for e in expression_list:
		fo.write(e+':'+str(result[e])+' ')
	fo.write('\n')

fo.close()

Esempio n. 35
0
import indicoio
import skimage.io
indicoio.config.api_key = "5daca1bb2437922364bfae1c7d733109"
import os
path = os.path.abspath('kevin.png')
pixel_array = skimage.io.imread(path).tolist()
from indicoio import fer, batch_fer, image_features
print(fer(pixel_array))
#print(image_features(pixel_array))
Esempio n. 36
0
 def test_happy_fer_pil(self):
     test_face = Image.open(os.path.normpath(os.path.join(DIR, "data/happy.png"))).convert('L');
     response = fer(test_face)
     self.assertTrue(isinstance(response, dict))
     self.assertTrue(response['Happy'] > 0.5)
Esempio n. 37
0
 def test_happy_fer(self):
     test_face = self.load_image("../data/happy.png", as_grey=True)
     response = fer(test_face)
     self.assertTrue(isinstance(response, dict))
     self.assertTrue(response['Happy'] > 0.5)
Esempio n. 38
0
import indicoio

indicoio.config.api_key = 'your api key'

print indicoio.fer("/home/itamar/PycharmProjects/facedetection/im2.jpg")
Esempio n. 39
0
 def test_happy_fer(self):
     test_face = os.path.normpath(os.path.join(DIR, "data/happy.png"))
     response = fer(test_face)
     self.assertTrue(isinstance(response, dict))
     self.assertTrue(response['Happy'] > 0.5)
Esempio n. 40
0
 def test_fear_fer(self):
     test_face = self.load_image("data/fear.png", as_grey=True)
     response = fer(test_face)
     self.assertTrue(isinstance(response, dict))
     self.assertTrue(response['Fear'] > 0.25)
Esempio n. 41
0
 def test_fear_fer(self):
     test_face = os.path.normpath(os.path.join(DIR, "data/fear.png"))
     response = fer(test_face)
     self.assertTrue(isinstance(response, dict))
     self.assertTrue(response['Fear'] > 0.25)
Esempio n. 42
0
 def test_happy_fer(self):
     test_face = self.load_image("data/happy.png", as_grey=True)
     response = fer(test_face)
     self.assertTrue(isinstance(response, dict))
     self.assertTrue(response['Happy'] > 0.5)
Esempio n. 43
0
 def test_batch_fer(self):
     test_data = [os.path.normpath(os.path.join(DIR, "data/48by48.png"))]
     response = fer(test_data, api_key=self.api_key)
     self.assertTrue(isinstance(response, list))
     self.assertTrue(isinstance(response[0], dict))
Esempio n. 44
0
collection.add_data([["text1", "label1"], ["text2", "label2"], ...])

# Training
collection.train()

# Telling Collection to block until ready
collection.wait()

# Done! Start analyzing text
collection.predict("indico is so easy to use!")


#images
import indicoio

indicoio.fer('https://IMAGE_URL')
Specifying Filepath

import indicoio

indicoio.fer('FILEPATH')
Formatting images using skimage

import skimage.io
import indicoio

pixel_array = skimage.io.imread('FILEPATH')

indicoio.fer(pixel_array)
Formatting images using PIL and numpy
Esempio n. 45
0
 def test_fer_detect(self):
     test_data = os.path.normpath(os.path.join(DIR, "data/fear.png"))
     response = fer(test_data, api_key=self.api_key, detect=True)
     self.assertIsInstance(response, list)
     self.assertEqual(len(response), 1)
     self.assertIn("location", response[0])
pictureCount = 1
forwardCount = 0
text = 0
while True:
    ret, frame = cam.read()

    if not ret:
        break
    k = cv2.waitKey(1)
    if k % 256 == 27:
        # ESC pressed
        print("Escape hit, closing...")
        break

    emotions = indicoio.fer(frame)
    cv2.putText(frame, "Angry:" + str(emotions["Angry"]), (1, 10),
                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
    cv2.putText(frame, "Sad:" + str(emotions["Sad"]), (1, 30),
                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 127, 127), 2)
    cv2.putText(frame, "Neutral:" + str(emotions["Neutral"]), (1, 50),
                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
    cv2.putText(frame, "Surprise:" + str(emotions["Surprise"]), (1, 70),
                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (127, 127, 255), 2)
    cv2.putText(frame, "Fear:" + str(emotions["Fear"]), (1, 90),
                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)
    cv2.putText(frame, "Happy:" + str(emotions["Happy"]), (1, 110),
                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (127, 0, 127), 2)

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
Esempio n. 47
0
 def test_url_support(self):
     test_url = "https://s3-us-west-2.amazonaws.com/indico-test-data/face.jpg"
     response = fer(test_url, api_key=self.api_key)
     self.assertTrue(isinstance(response, dict))
     self.assertEqual(len(response.keys()), 6)
Esempio n. 48
0
 def test_url_support(self):
     test_url = "https://s3-us-west-2.amazonaws.com/indico-test-data/face.jpg"
     response = fer(test_url, api_key=self.api_key)
     self.assertTrue(isinstance(response, dict))
     self.assertEqual(len(response.keys()), 6)