def test_batch_image_recognition(self):
     test_data = os.path.normpath(os.path.join(DIR, "data", "fear.png"))
     response = image_recognition([test_data, test_data], api_key = self.api_key, top_n=3)
     self.assertIsInstance(response, list)
     self.assertIsInstance(response[0], dict)
     self.assertEqual(len(response[0]), 3)
     self.assertIsInstance(list(response[0].values())[0], float)
Esempio n. 2
0
 def analyze_image(self, gif_url):
     """
     Checks to see if there are any recognizable objects in the gifs, and if there are performs sentiment analysis on the objects. Then it outputs the average of the sentiment analysis of all the objects
     """
     gif_output = []
     #print(gif_url)
     while True:
         try:
             gif_output = indicoio.image_recognition(gif_url)
             break
         except ValueError:
             #print('oops')
             return None
     #print(gif_output)
     self.list_objects = [
         name for name, prob in gif_output.items() if prob > .1
     ]
     if len(self.list_objects) == 1:
         self.sentiment_output = indicoio.sentiment_hq(self.list_objects[0])
     elif len(self.list_objects) == 0:
         return self.text_sentiment
     else:
         self.sentiment_output = indicoio.sentiment_hq(self.list_objects)
         self.sentiment_output = np.mean(self.sentiment_output)
     return self.sentiment_output
Esempio n. 3
0
def processImage():
	url = request.args.get("url")
	r = requests.get(url, stream = True)
	encoded_string = base64.b64encode(r.content)
	indicoResult = indicoio.image_recognition(encoded_string, top_n=2, hq=True)
	app.logger.debug(indicoResult)
	
	return json.jsonify(uri=encoded_string, keywords=indicoResult)
Esempio n. 4
0
 def test_batch_image_recognition(self):
     test_data = os.path.normpath(os.path.join(DIR, "data", "fear.png"))
     response = image_recognition([test_data, test_data],
                                  api_key=self.api_key,
                                  top_n=3)
     self.assertIsInstance(response, list)
     self.assertIsInstance(response[0], dict)
     self.assertEqual(len(response[0]), 3)
     self.assertIsInstance(list(response[0].values())[0], float)
Esempio n. 5
0
dresses = ["dress", "gown"]
tops = ["button", "blouse", "shirt", "top"]
bottoms = ["pants", "skirt", "trousers", "denim", "jeans", "shorts", "slacks"]

clothing = []
i=0
for root, dirs, files in os.walk("imgs"):
    for image in files:
        if image.endswith("jpg"):
            clothing.append(os.path.join(root, image))
            i+=1

            if i == 500:
                break
            
clothes = indicoio.image_recognition(clothing)
j = open('jackets.txt', 'w')
d = open('dresses.txt', 'w')
t = open('tops.txt', 'w')
b = open('bottoms.txt', 'w')

i=0
for cloth in clothes:
    sortC = sorted(cloth.items(), key=operator.itemgetter(1), reverse=True)
    print(sortC[0][0])
    if any(s in sortC[0][0] for s in jackets):
        j.write(clothing[i]+"!"+sortC[0][0]+"?")
    elif any(s in sortC[0][0] for s in dresses):
        d.write(clothing[i]+"!"+sortC[0][0]+"?")
    elif any(s in sortC[0][0] for s in tops):
        t.write(clothing[i]+"!"+sortC[0][0]+"?")
Esempio n. 6
0
 def test_expected_response(self):
     test_data = os.path.normpath(os.path.join(DIR, "data", "keyboard.jpg"))
     response = image_recognition(test_data, api_key=self.api_key, top_n=3)
     assert "space bar" in response.keys()
 def test_expected_response(self):
     test_data = os.path.normpath(os.path.join(DIR, "data", "keyboard.jpg"))
     response = image_recognition(test_data, api_key = self.api_key, top_n=3)
     assert "space bar" in response.keys()
Esempio n. 8
0
def findThings(thePath):
    #finds the objects in the photo
    theObj = indicoio.image_recognition(thePath, top_n=3)

    return theObj.keys()
import cv2
import time

import indicoio
indicoio.config.api_key = 'd17e09c08d43f673f05743ec7304c9be'

cap = cv2.VideoCapture(0)

while(True):
    # Capture frame-by-frame
    time.sleep(.2) #a pause so that this doesn't process EVERY frame (twould be overly expensive/time-consuming)
    ret, frame = cap.read()

    # Display the resulting frame
    cv2.imshow('frame',frame)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
    highestProb=[0]
    highestItem=['thing']
    #print indicoio.image_recognition(frame)
    probDict= indicoio.image_recognition(frame) #returns a dictionary of 1000 "likely" items and the probability that it is the object...
    for item in probDict:
    	if(probDict[item]>highestProb[0]): #finding the most likely object on the screen
    		highestProb[0]=probDict[item] #returning a list so that we know what the top 5 objects are
    		highestItem[0]=item
    print highestProb
    print highestItem

# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()