def IsPizza(image1): app = ClarifaiApp(api_key='f53aa080f78d418fb75051b03f80d3c4') model = app.models.get('food-items-v1.0') # image = ClImage(url='https://s3.amazonaws.com/clarifai-api/img2/prod/small/b8ea54acf15c4642a5ba985bda8b9633/de65940cc1524bf3a01563a7bf8ddac7') if validators.url(image1): image = ClImage(image1) else: image = ClImage(open(image1, 'rb')) response = model.predict([image]) elements = response['outputs'][0]['data']['concepts'] isPizzaThreshold = 0.85 isPineappleThreshold = 0.3 isPizza = False isPineapple = False for element in elements: if (element['name'] == 'pizza') and (element['value'] > isPizzaThreshold): isPizza = True if (element['name'] == 'pineapple') and (element['value'] > isPineappleThreshold): isPineapple = True if isPineapple and isPizza: print('Get the hell out of here') elif isPizza: print( 'Your pizza passes the standards of the anti pineapple pizza community.' )
def uploadTrain(): apparel_Model = APP.models.get('apparel') imgAry = [] import mongo data = mongo.getTrain() print(len(data)) for item in data[401:]: #print(item['Unique_No']) try: obj = open('./img/' + str(item['Unique_No']) + '.jpg', "rb") # print(obj) conceptObj = apparel_Model.predict([ClImage(file_obj=obj)]) mainConcept = '' for key in conceptObj['outputs'][0]['data']['concepts']: mainConcept = key['name'] break imgAry.append( ClImage(file_obj=obj, image_id=str(item['Unique_No']) + '.jpg', concepts=[mainConcept], metadata={'key': mainConcept})) except Exception as e: print(e) pass #print(imgAry[:10]) APP.inputs.bulk_create_images(imgAry) # bulkInput() # search() # delAll() # uploadTrain() # searchAndUpload()
def whichOneIsCuter(image_url1, image_url2): img1 = ClImage(url = image_url1, allow_dup_url=True) img2 = ClImage(url = image_url2, allow_dup_url=True) app.inputs.bulk_create_images([img1, img2]) cute = app.inputs.search_by_predicted_concepts(concept='cute') for i in range(len(cute)): print (cute[i].url)
def bulkInsert(start, end, path, filesAry): apparel_Model = APP.models.get('apparel') imgAry = [] print(filesAry) try: for filename in filesAry[start:end]: obj = open(os.path.join(path, filename), "rb") #print(os.path.join(path, filename)) conceptObj = apparel_Model.predict([ClImage(file_obj=obj)]) #print("enter") mainConcept = '' for key in conceptObj['outputs'][0]['data']['concepts']: mainConcept = key['name'] break imgAry.append( ClImage(file_obj=obj, image_id=filename, concepts=[mainConcept], metadata={'key': mainConcept})) # break except: pass #print(imgAry[:10]) APP.inputs.bulk_create_images(imgAry)
def main(): app = ClarifaiApp() # get image height and width img = Image.open('./img/face1.jpeg') width, height = img.size # detect boundaries of face face_model = app.models.get('face-v1.3') image = ClImage(file_obj=open('./img/face1.jpeg', 'rb')) face_result = face_model.predict([image]) #print(face_result) face = face_result['outputs'][0]['data'] # get boundaries for face for i in face.values(): for j in i: top = i[0]['region_info']['bounding_box']['top_row'] left = i[0]['region_info']['bounding_box']['left_col'] right = i[0]['region_info']['bounding_box']['right_col'] bottom = i[0]['region_info']['bounding_box']['bottom_row'] # calculate height and width top = int(top * height) left = int(left * width) right = int(right * width) bottom = int(bottom * height) # crop image img2 = img.crop((left, top, right, bottom)) img2 = img2.resize((width, height)).save('./img/new.jpeg') # detect color in cropped image image2 = ClImage(file_obj=open('./img/new.jpeg', 'rb')) color_model = app.models.get('color') color_result = color_model.predict([image2]) #print(result) color = color_result['outputs'][0]['data'] #print(color) a = 0 mval = 0 for i in color.values(): for j in i: hex_dec = i[a]['w3c']['hex'] name = i[a]['w3c']['name'] value = i[a]['value'] a = a + 1 if hex_dec != '#ffffff' or hex_dec != '#000000': if value > mval: mval = value skin = name dec = hex_dec print(mval, skin, dec)
def add_images(self, images, class_type): for chunk in chunks(images, 128): if class_type == 'positive': cl_images = [ ClImage(filename=path, concepts=[self.concept_name]) for path in chunk ] else: cl_images = [ ClImage(filename=path, not_concepts=[self.concept_name]) for path in chunk ] self.app.inputs.bulk_create_images(cl_images)
def __init__(self, source): url_regex = re.compile( r'^(?:http|ftp)s?://' # http:// or https:// r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain... r'localhost|' #localhost... r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip r'(?::\d+)?' # optional port r'(?:/?|[/?]\S+)$', re.IGNORECASE) if re.match(url_regex, source) is not None: self.image = ClImage(url=source) else: self.image = ClImage(filename=source)
def check_friend(self, frame): img_name = 'tmp.png' cv2.imwrite(img_name, frame) print("{} written!".format(img_name)) differences = [] for friend in self.friends: test_img = ClImage(filename=img_name) test_img_pred = self.model.predict([test_img]) while True: try: pic2 = test_img_pred['outputs'][0]['data']['regions'][0][ 'data']['embeddings'][0]['vector'] break except KeyError: print("Oops! That was no valid number. Try again...") # differences.append(L2(pic2, friend[1])) differences.append(neural_network(pic2, friend[1])) if len(differences) > 0: if min(differences) < self.limit: self.friend_name = self.friends[differences.index( min(differences))][0] self.show_message = 1 else: self.show_message = 2 print('Difference is: {}'.format(min(differences))) else: self.show_message = 2
def run(myApi): #Import statements + constructors import os from clarifai.rest import ClarifaiApp from clarifai.rest import Image as ClImage #Creates Clarifai App app = ClarifaiApp(api_key=myApi) #Resets the server to 0 images, avoids duplicates app.inputs.delete_all() #URL URL_FILE_NAME = 'vacations_url.txt' URL_FILE_PATH = os.path.join(os.path.curdir, URL_FILE_NAME) #Reads the urls of images from txt file with open(URL_FILE_PATH) as data_file: images = [url.strip() for url in data_file] row_count = len(images) #Making images into a list that convers into being uploadable to the server imageList = [] for i in range(row_count): imageList.append(ClImage(url=images[i], allow_dup_url=False)) app.inputs.bulk_create_images(imageList) #NAMES NAMES_FILE_NAME = 'vacations_names.txt' NAMES_FILE_PATH = os.path.join(os.path.curdir, NAMES_FILE_NAME) return
def determine(image): # get the general model #model = app.models.get("general-v1.3") model = app.models.get("Ocean Cleanup") image = ClImage(file_obj=open(image, 'rb')) output = str(model.predict([image])) #find concepts related to image index = output.find("u'name'") + 1 conc = [] for i in range(1, output.count("u'name'")): index = output.find("u'name'", index + 1) conc.append(output[index + 11:output.find("'", index + 13)]) #obj = "not found" #for i in conc: # if (i == "bottle" or i == "can" or i == "balloon" or i == "cigarettes" or i == "plastic bag"): # obj = conc[i] # break jsonTags = model.predict([image]) #print (jsonTags['outputs'][0]['data']['concepts'][0]['name']) names = [] for tag in jsonTags['outputs'][0]['data']['concepts']: names.append(tag['name']) obj = "not found" for i in names: if (i == "bottle" or i == "can" or i == "balloon" or i == "cigarettes" or i == "plastic bag"): obj = i return obj
def clarifai_prediction(image): app = ClarifaiApp(api_key='f789eaa60ab04869a2b80c752daa3484') model = app.models.get('Mood Recognition') climage = ClImage(filename=image) hello = model.predict([climage]) test = hello['outputs'][0]['data']['concepts'][0]['id'] return test
def add_category(post): app = ClarifaiApp(api_key=API_KEY) model = app.models.get("general-v1.3") image = ClImage(file_obj=open(post.image.path, 'rb')) response = model.predict([image]) if response["status"]["code"] == 10000: if response["outputs"]: if response["outputs"][0]["data"]: if response["outputs"][0]["data"]["concepts"]: for index in range( 0, len(response["outputs"][0]["data"]["concepts"])): category = CategoryModel( post=post, category_text=response["outputs"][0]["data"] ["concepts"][index]["name"]) category.save() else: print("No Concepts List Error") else: print("No Data List Error") else: print("No Outputs List Error") else: print("Response Code Error")
def ethnicity_guesser(image_url): model = app.models.get("demographics") image = ClImage(url = image_url) data = model.predict([image]) ethnicity = data['outputs'][0]['data']['regions'][0]['data']['face']['multicultural_appearance']['concepts'][0]['name'] # concept_names = [concept['name'] for concept in concepts] return ethnicity
def identify(): """!identify <image> Queries clarif.ai to identify the top 10 concepts within an image. Usage: !identify <image>""" msg = Message(flask.request.form) fileName = msg.fileName if fileName == 'noFile': return 'Please supply a file.' elif fileName == 'fileError': return 'There was an error selecting the last file transfer.' else: init_clarifai() model = clarifai.models.get('general-v1.3') image = ClImage(file_obj=open(fileName, 'rb')) imageData = model.predict([image]) try: imageData = imageData['outputs'][0]['data']['concepts'][:10] imageData = [x['name'] for x in imageData] return reduce_output(imageData, delimiter=', ') except: return 'Error'
def check_image(browser, clarifai_api_key, img_tags, img_tags_skip_if_contain, logger, full_match=False, logging=False, picture_url=None): """Uses the link to the image to check for invalid content in the image""" clarifai_api = ClarifaiApp(api_key=clarifai_api_key) # set req image to given one or get it from current page if picture_url is None: img_link = get_imagelink(browser,logger,logging) else: img_link = picture_url # Uses Clarifai's v2 API model = clarifai_api.models.get('general-v1.3') image = ClImage(url=img_link) result = model.predict([image]) clarifai_tags = [concept.get('name').lower() for concept in result[ 'outputs'][0]['data']['concepts']] for (tags, should_comment, comments) in img_tags: if should_comment: if given_tags_in_result(tags, clarifai_tags, full_match): return True, comments else: if given_tags_in_result(tags, clarifai_tags, full_match): if not given_tags_in_result(img_tags_skip_if_contain, clarifai_tags, full_match): logger.info('Not Commenting, image reco contains: "{}".'.format(', '.join(list(set(clarifai_tags)&set(tags))))) return False, [] return True, []
def label_images(input_dir, filetype, season): """ Label images to be fed to Clarifai Model """ # TODO: Figure out how to add concepts. # Assign them IDs and specify concepts and non concepts. # figure out the bulk_create_images response error. seasons = ["spring","summer","fall", "winter"] concept = season seasons.remove(str(season)) nonconcepts = seasons print(concept) print(nonconcepts) app = ClarifaiApp(api_key='f7e11a3064f8468087ca656dce9e7abc') images = [os.path.join(input_dir,i) for i in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir,i)) and i.endswith(filetype)] #print(app.inputs.get_all()) for image in app.inputs.get_all(): print(image.input_id) #app.concepts.bulk_create(['id1', 'id2','id3','id4'], ['spring', 'summer','fall', 'winter']) #df = pd.DataFrame(columns=['Image_Name','Colors','Hex_Numbers']) img_list = [] #for count, picture in enumerate(images,1): image = ClImage(url='https://samples.clarifai.com/metro-north.jpg', concepts=[concept], not_concepts=[nonconcepts])
def output_prediction(word): app = ClarifaiApp(api_key='b93c9746c9cc4a79baec13d0a83739cb') model = app.models.get('food-items-v1.0') image = ClImage(url=word) data = model.predict([image]) food_list = [data['outputs'][0]['data']['concepts'][0]['name']] return (food_list[0], get_recipe(food_list, [])[0])
def isThisFood(image_url): model = app.models.get('food-items-v1.0') image = ClImage(url= image_url) response_data = model.predict([image]) concepts = response_data['outputs'][0]['data']['concepts'] concept_names = [concept['name'] for concept in concepts] return concept_names
def my_callback(channel): GPIO.output(pir_led,True) #Turn on LED time.sleep(0.5) # leave LED on for 0.5 seconds, just for test camera = picamera.PiCamera() camera.vflip = True GPIO.output(camera_led, GPIO.HIGH) camera.capture('slika.jpg') GPIO.output(camera_led, GPIO.LOW) camera.close() GPIO.output(pir_led,False) #turn off LED danger = False app = ClarifaiApp(api_key='------------key-----------') model = app.models.get('general-v1.3') image = ClImage(file_obj=open('slika.jpg', 'rb')) response=model.predict([image]) concepts = response['outputs'][0]['data']['concepts'] for concept in concepts: print(concept['name'], concept['value']) if any(x in concept['name'] for x in ["bird"]): # can be more filters: ["danger","man","people","adult","weapon","safety"]): danger = True #print('bird on camera') if danger: print('sviraj') #zvuk song = "countdown.mp3" pygame.init() pygame.mixer.init() pygame.mixer.music.load(song) pygame.mixer.music.play() while pygame.mixer.music.get_busy(): pygame.time.Clock().tick(10)
def array_to_byte(inputs): fig = np.around((inputs + 0.5) * 255) fig = fig.astype(np.uint8).squeeze() img = Image.fromarray(fig) imgByteArr = io.BytesIO() img.save(imgByteArr, format="PNG") return ClImage(file_obj=imgByteArr)
def lewd(): """!lewd <image> Queries clarif.ai to detect if an image is 'lewd'. Usage: !lewd <image>""" msg = Message(flask.request.form) fileName = msg.fileName if fileName == 'noFile': return 'Please supply a file.' elif fileName == 'fileError': return 'There was an error selecting the last file transfer.' else: init_clarifai() model = clarifai.models.get('nsfw-v1.0') if not model: return 'Model no longer exists.' image = ClImage(file_obj=open(fileName, 'rb')) responses = [] imageData = model.predict([image]) result = imageData['outputs'][0]['data']['concepts'][0] if result['name'] == u'nsfw': responses.append('LEEEWWWDDD!!!!!') responses.append('accuracy: %f' % result['value']) return responses else: return 'not lewd'
def extract_demography(app, url): if app is None: app = ClarifaiApp(api_key='9a2117f4967a4f2d92e84adb221c5cc1') model = app.models.get("demographics") if isinstance(url, str): prediction = model.predict_by_url(url=url) else: image = ClImage(file_obj=url) prediction = model.predict([image]) try: regions = prediction['outputs'][0]['data']['regions'] for region in regions[:1]: ## analyse only first guy on the picture concepts = region['data']['face'] age = sorted(concepts['age_appearance']['concepts'], key=lambda j: -j['value'])[0]['name'] gender = sorted(concepts['gender_appearance']['concepts'], key=lambda j: -j['value'])[0]['name'] multiculture = sorted(concepts['multicultural_appearance']['concepts'], key=lambda j: -j['value'])[0]['name'] result = { "age": age, "gender": gender, "multiculture": multiculture } return result except KeyError as e: return {}
def IsPizza(url): image = ClImage(url=url) response = model.predict([image]) elements = response['outputs'][0]['data']['concepts'] isPizzaThreshold = 0.85 isPineappleThreshold = 0.3 isPizza = False isPineapple = False for element in elements: if (element['name'] == 'pizza') and (element['value'] > isPizzaThreshold): isPizza = True if (element['name'] == 'pineapple') and (element['value'] > isPineappleThreshold): isPineapple = True if isPineapple and isPizza: response = 'FATAL ERROR! pineapple detected.' elif isPizza: response = 'Your pizza passes the standards of the anti pineapple pizza community.' else: response = 'That is no pizza, man.' return response
def call_vision_api(image_filename, api_keys): clarifai_key = api_keys['clarifai'] app = ClarifaiApp(api_key=clarifai_key) model = app.models.get('general-v1.3') image = ClImage(file_obj=open(image_filename, 'rb')) result = model.predict([image]) return result
def predict_color(app, url, num_top=1): if app is None: app = ClarifaiApp(api_key='9a2117f4967a4f2d92e84adb221c5cc1') model = app.models.get("color") # predict with the model if isinstance(url, str): prediction = model.predict_by_url(url=url) else: image = ClImage(file_obj=url) prediction = model.predict([image]) colors = prediction['outputs'][0]['data']['colors'] #example : [{'raw_hex': '#d4ced6', 'w3c': {'hex': '#d3d3d3', 'name': 'LightGray'}, 'value': 0.59625}, {'raw_hex': '#e42123', 'w3c': {'hex': '#dc143c', 'name': 'Crimson'}, 'value': 0.09675}, {'raw_hex': '#24242a', 'w3c': {'hex': '#000000', 'name': 'Black'}, 'value': 0.10125}, {'raw_hex': '#928f76', 'w3c': {'hex': '#808080', 'name': 'Gray'}, 'value': 0.16375}, {'raw_hex': '#f08b85', 'w3c': {'hex': '#f08080', 'name': 'LightCoral'}, 'value': 0.042}] colors = list( map(lambda c: { "embedding": c['w3c']['hex'], "prob": c['value'] }, colors)) colors.sort(key=lambda x: -x['prob']) colors = colors[:min(num_top, len(colors))] logger.info("predict_color. colors are: {}".format(colors)) return colors
def clarifai(runningCount, user_name): #need img_link clarifai_api = ClarifaiApp(api_key='a88172a02f3e4036b5b13bdee391e7f3') img_link = browser.find_element_by_xpath('//img[@class = "_2di5p"]') \ .get_attribute('src') #print(img_link) #general model #model = clarifai_api.models.get('general-v1.3') #nsfw model model = clarifai_api.models.get('e9576d86d2004ed1a38ba0cf39ecb4b1') image = ClImage(url=img_link) #only predicts image, not videos result = model.predict([image]) #print(result) sfwValue = float(result['outputs'][0]['data']['concepts'][0]['value']) print(sfwValue) if sfwValue >= 0.80: print("passed clarifai") #finding hearts when on picture - ready to use. #hearts = browser.find_elements_by_xpath("//a[@role='button']/span[@class='_8scx2']") hearts = browser.find_elements_by_class_name('coreSpriteHeartOpen') #/html/body/div[4]/div/div[2]/div/article/div[2]/section[1]/a[1]/span print(len(hearts)) sleep(5) ActionChains(browser).move_to_element(hearts[0]).click().perform() print("liked photo") sleep(5) runningCount += 1 print("Total likes this round: ", runningCount) #INSERT USERNAME AND TIME TO DATABASE using sqlite3 #timeNow = time.ctime(int(time.time())) #insert_user(user_name, timeNow) #query all to make list #allUsers = query_users() #print(allUsers) #delete all usernames in db and check to make sure they cleared #delete_table() #anyLeft = query_users() #print(anyLeft) #INSERT USERNAME AND TIME TO DATABASE using postgreSQL timeNow = time.ctime(int(time.time())) insert_user(user_name, timeNow) else: print("failed clarifai")
def upload(): if request.method == 'POST': file = request.files['file'] if file and allowed_file(file.filename): now = datetime.now() username = request.cookies.get('userID') print(username) filename = os.path.join(app.config['UPLOAD_FOLDER'], username) filename = os.path.join(app.config['UPLOAD_FOLDER']+username, "%s.%s" % (now.strftime("%Y-%m-%d-%H-%M-%S-%f"), file.filename.rsplit('.', 1)[1])) print(app.config['UPLOAD_FOLDER']+username) file.save(filename) #image = ClImage(url='https://samples.clarifai.com/metro-north.jpg') image = ClImage(file_obj=open(filename, 'rb')) pred = model.predict([image]) user_images.insert_one( { "username": username, "filepath": filename, "clarifai_data": pred } ) print(pred) return jsonify({"success":True}) elif(request.method == 'GET'): return render_template("upload.html")
def handler(): app = ClarifaiApp(api_key=env_vars.clarifai_api_key) model = app.models.get('food-items-v1.0') img = ClImage(file_obj=open('pics/fridge2.jpeg', 'rb')) pred = model.predict([img]) items = "" for con in pred['outputs'][0]['data']['concepts']: items += json.dumps(con['name'])[1:-1] + ", " items = items[:-2] params = {"ingredients": items} response = unirest.get( "https://spoonacular-recipe-food-nutrition-v1.p.mashape.com/recipes/findByIngredients", params=params, headers={ "X-Mashape-Key": "IwLJqO6bAtmshh9PXpr3EhpVvpJWp1smLbBjsnwCNvi8KTvV4u", "X-Mashape-Host": "spoonacular-recipe-food-nutrition-v1.p.mashape.com" }) title_video_id = {} for res in response.body: title = json.dumps(res['title'])[1:-1] video_id = youtube.youtube_search(title) if video_id: title_video_id[title] = str(video_id) return title_video_id
def extractImageFeatures(imgs_files): classes = ['BH','BR','O','P','SH'] fs = FileSystemStorage(location=settings.FEAT_ROOT) app = ClarifaiApp('jiBNMPUJ4QR7GT-VRDL6ZdrvfgbYRzK6jX7DQKXp','jlwJ1AKBNSN1B8205X_3oqaZLYrxOa2kL3DUKX77') app.auth.get_token() batch = 90 sent_count = 0 model = app.models.get('general-v1.3') concepts = [] while sent_count < len(imgs_files): imgs = map(lambda x: ClImage(file_obj=open(x, 'rb'), image_id=x), imgs_files[sent_count:min(len(imgs_files), sent_count + batch)]) resp = model.predict(imgs) sent_count += min(len(imgs), sent_count + batch) filenames = map(lambda x: x['input']['id'], resp['outputs']) concepts += getConcepts(resp) file_paths = [] vocabs = os.listdir(settings.VOCAB_ROOT) vocabs = map(lambda x: os.path.join(settings.VOCAB_ROOT,x),vocabs) for j in range(len(vocabs)): vocab = vocabs[j] with open(vocab, 'r') as f: vocab = f.readlines() for i in range(len(concepts)): fv = np.array([map(lambda x: concepts[i].get(x[:-1],0.0), vocab)]) f_path = fs.path(imgs_files[i].split('/')[-1])+"_%s.feat"%classes[j] file_paths.append(f_path) np.savetxt(f_path, fv, fmt='%s') return file_paths
def clarify(urlstr): if 'http:' in urlstr: x = ClImage(url = urlstr) else: x = app.inputs.create_image_from_filename(urlstr) predictions = str(model.predict([x])) #Outputing the response string to a file and formatting it fil = open('output.txt', 'w') for i in predictions: if i == '}': fil.write('}\n') else: fil.write(i) #Takes the highest confidence line fil = open('output.txt','r') for line in fil: if (('\'id\': \'ai' in line) == True): result = line print(line) break #Takes only the name result1 = result[(result.find('\'name\':')+7):] result2 = result1[:result1.find(',')] return result2