def main(args): visual_recognition = VisualRecognition( version='2016-05-20', api_key='7b851fccf7f17a35fc7569a5dad6e1eb4f650f70') with open('ingredients.txt') as f: lines = f.read().splitlines() for line in lines: directory = "C:/Dev/GitHub/flavortown/imagetesting/zips" + line query = line max_images = 25 save_directory = directory image_type = "Action" query = query.split() query = '+'.join(query) query = query + "+walmart+OR+amazon" url = "https://www.google.com/search?q=" + query + "&source=lnms&tbm=isch" header = { 'User-Agent': "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36" } soup = get_soup(url, header) ActualImages = [ ] # contains the link for Large original images, type of image for a in soup.find_all("div", {"class": "rg_meta"}): link, Type = json.loads(a.text)["ou"], json.loads(a.text)["ity"] ActualImages.append((link, Type)) fileNames = [] for i, (img, Type) in enumerate(ActualImages[0:max_images]): try: req = urllib2.Request(img, headers={'User-Agent': header}) raw_img = urllib2.urlopen(req).read() fileName = "" if len(Type) == 0: fileName = "img" + "_" + str(i) + ".jpg" f = open(fileName, 'wb') else: fileName = "img" + "_" + str(i) + "." + Type f = open(fileName, 'wb') f.write(raw_img) f.close() fileNames.append(fileName) except Exception as e: print("could not load : " + img) myzip = ZipFile(line + '.zip', 'w', zipfile.ZIP_DEFLATED) for fileName in fileNames: myzip.write(fileName) myzip.printdir() myzip.close() for fileName in fileNames: os.remove(fileName)
def main(): #load the visual recognition service from Watson with the right credentials creds = json.load(open('credentials'+os.sep+'watson_credentials.json', 'r'))['visual'] url = creds['url'] api_key = creds['iam_apikey'] visual_recognition = VisualRecognition(version='2016-05-20', url=url, iam_apikey=api_key) window = GUI(visual_recognition) window.mainloop()
def __init__(self): self.context = {} self.option_dict = {} self.intents = [] self.entities = [] self.recipe_options = [] self.database_cursor = None # Reset enviroment variables self.context['search_recipe'] = False self.context['image_recipe'] = False self.context['suggest_dish'] = False self.context['yum_sugest'] = False self.context['summary'] = False self.context['option'] = None self.context['cuisine_type'] = None self.context['ingredients'] = None self.context['intolerances'] = None self.context['dish'] = None self.context['counter'] = 0 self.context['insult_counter'] = 0 # Services initialization # Slack client instance self.slack_client = SlackClient(SLACK_BOT_TOKEN) # Watson Conversation sevice instance self.conversation = Conversation(version = CONVERSATION_VERSION, username = CONVERSATION_USERNAME, password = CONVERSATION_PASSWORD, url = CONVERSATION_URL) # Watson Visual Recognition service instance self.visual_recognition = VisualRecognition(version=VISUAL_RECOGNITION_VERSION, url=VISUAL_RECOGNITION_URL, api_key=VISUAL_RECOGNITION_KEY) # Database connection self.database_connection(DB_STRING_CONNECTION)
def search(request): if (request.method == 'POST'): f = searchform(request.POST, request.FILES) if not f.is_valid(): return render(request, 'objects/home.html', {'f': f}) userobj = f.save(commit=False) userobj.save() visual_recognition = VisualRecognition( '2016-09-30', api_key='7a85ea3ccce43d03daea662cb7b6b7236aeb4dd0') img = visual_recognition.classify( images_url="http://127.0.0.1:8000/images/9/") print(json.dumps(img, indent=2)) p = img.images.classifiers.classes for i in p: d = i['class'] tags.objects.create(tags_value=d, parent=userobj) return HttpResponse('OK') if (request.method == 'GET'): f = searchform() return render(request, 'objects/home.html', {'f': f})
import json from os.path import join, dirname from os import environ from watson_developer_cloud import VisualRecognitionV3 as VisualRecognition import picamera from time import sleep import os import zipfile #update the clasifier id here classifier_ids = ["face_429689582"] #Update the api key visual_recognition = VisualRecognition( '2016-05-20', api_key='dec041de3393d4b66120d22e69999999999c0') directory = "image" url = None file = open("image.zip", "rb") #intialize the camera camera = picamera.PiCamera() if not os.path.exists(directory): os.makedirs(directory) zipf = zipfile.ZipFile('image.zip', 'w', zipfile.ZIP_DEFLATED) for i in range(10): filePath = 'image/' + str(i) + '.jpg' camera.capture(filePath) sleep(0.1)
import json from os.path import join, dirname from watson_developer_cloud import VisualRecognitionV1Beta as VisualRecognition visual_recognition = VisualRecognition(username='******', password='******') print(json.dumps(visual_recognition.labels(), indent=2)) with open(join(dirname(__file__), '../resources/test.jpg'), 'rb') as image_file: print( json.dumps(visual_recognition.recognize( image_file, labels_to_check={'label_groups': ['Indoors']}), indent=2))
def __init__(self): self.vr = VisualRecognition( version='2016-05-20', api_key='7b851fccf7f17a35fc7569a5dad6e1eb4f650f70')
import json from os.path import join, dirname from os import environ from watson_developer_cloud import VisualRecognitionV3 as VisualRecognition from watson_developer_cloud import VisualRecognitionV3 as VisualRecognition #31e589a7ae4990a6261e68d8b2931acf5145b973 - alvaro - junto #37ff1e95c9da3f5e5161d6f49b0139469c087f8d . watsonR - separados visual_recognition = VisualRecognition('2016-05-20', api_key='31e589a7ae4990a6261e68d8b2931acf5145b973') with open(join(dirname(__file__), 'positive_todas.zip'), 'rb') as carne_uptc, \ open(join(dirname(__file__), 'negative_arregladas.zip'), 'rb') as carne_otro: print(json.dumps(visual_recognition.create_classifier('carne_uptc', completo_positive_examples=carne_uptc, negative_examples=carne_otro), indent=2)) #with open(join(dirname(__file__), 'positive_arregladas.zip'), 'rb') as carne_completo, \ # open(join(dirname(__file__), 'positive_arregladas.zip'), 'rb') as carne_cortado, \ # open(join(dirname(__file__), 'negative_arregladas.zip'), 'rb') as carne_otro: # print(json.dumps(visual_recognition.create_classifier('CarnesUPTCvsCarnesOtros', completo_positive_examples=carne_completo, cortado_positive_examples=carne_cortado, negative_examples=carne_otro), indent=2))
def full(fileName): vr = VisualRecognition(version='2016-05-20', api_key='7b851fccf7f17a35fc7569a5dad6e1eb4f650f70') rgb = scipy.misc.imread(fileName, mode='RGB') aspect_ratio = len(rgb) / len(rgb[1]) rgb = transform.resize(rgb, [int(1000*aspect_ratio), 1000]) img = color.rgb2lab(rgb) thresholded = np.logical_and(*[img[..., i] > t for i, t in enumerate([40, 0, 0])]) if (np.sum(thresholded) > (thresholded.size / 2)): thresholded = np.invert(thresholded) X = np.argwhere(thresholded)[::5] X = np.fliplr(X) db = DBSCAN(eps=25, min_samples=200).fit(X) core_samples_mask = np.zeros_like(db.labels_, dtype=bool) core_samples_mask[db.core_sample_indices_] = True labels = db.labels_ # Number of clusters in labels, ignoring noise if present. n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0) print('Estimated number of clusters: %d' % n_clusters_) unique_labels = set(labels) cropped_images = [] unique_labels.remove(-1) col=0 for k in unique_labels: #my_members = labels == k #members = X[my_members, 0] left = min(X[labels==k][:,0]) right = max(X[labels==k][:,0]) padding = 20 if left > padding: left = left - padding if right < len(img[1]) - padding: right = right + padding cropped_images.append(rgb[0:len(img), left:right]) # save each cropped image by its index number myzip = ZipFile('test.zip', 'w',zipfile.ZIP_DEFLATED) for c, cropped_image in enumerate(cropped_images): io.imsave(str(c) + ".png", cropped_image) myzip.write(str(c) + ".png") myzip.printdir() myzip.close() for c, cropped_image in enumerate(cropped_images): os.remove(str(c) + ".png") classes = [] with open('test.zip', 'rb') as img: param = {'classifier_ids':"foodtest_1606116153"} params = json.dumps(param) response = vr.classify(images_file=img, parameters=params) for image in response['images']: max_score = 0 max_class = "" for classifier in image['classifiers']: for classif in classifier['classes']: if (classif['score'] > max_score): max_class = classif['class'] if max_class: max_class = max_class.replace('_', ' ') if (max_class) not in classes: classes.append(max_class) os.remove('test.zip') return(classes)
# -*- coding: utf-8 -*- import json import sys import requests from channels import Group from bs4 import BeautifulSoup from watson_developer_cloud import LanguageTranslatorV2 as LanguageTranslator from watson_developer_cloud import ToneAnalyzerV3 as ToneAnalyzer from watson_developer_cloud import VisualRecognitionV3 as VisualRecognition language_translator = LanguageTranslator( username='******', password='******') visual_recognition = VisualRecognition( '2016-05-20', api_key='4beec9a677ee8140f62645d7dcb8d52bffbcd0c3') tone_analyzer = ToneAnalyzer(version='2016-05-19', username='******', password='******') def send_channel_message(room, label, channel_layer, message, context): data = {'handle': 'Watson', 'message': message} m = room.messages.create(**data) text = json.dumps(m.as_dict()) room.context = json.dumps(context) room.save() Group('chat-' + label, channel_layer=channel_layer).send({'text': text}) def send_message_to_watson(room, conversation, channel_layer, command, label,
import json from os.path import join, dirname from os import environ from watson_developer_cloud import VisualRecognitionV3 as VisualRecognition #31e589a7ae4990a6261e68d8b2931acf5145b973 - alvaro - junto #37ff1e95c9da3f5e5161d6f49b0139469c087f8d . watsonR - separados visual_recognition = VisualRecognition( '2016-05-20', api_key='37ff1e95c9da3f5e5161d6f49b0139469c087f8d') data = json.loads( json.dumps(visual_recognition.classify( owners="me", images_url= "https://scontent-mia1-2.xx.fbcdn.net/v/t34.0-12/15327550_10211547072255521_2026008890_n.jpg?oh=de2807a23063390a95628815543e73e9&oe=5847146E" ), indent=2)) #print data if (len(data['images'][0]['classifiers']) > 0): for i in data['images'][0]['classifiers'][0]['classes']: print i['class'] + ' = ' + str(i['score']) else: print 'no se ha punteado con ninguno de los valores de las dos clases'
import json from os.path import join, dirname from os import environ from watson_developer_cloud import VisualRecognitionV3 as VisualRecognition visual_recognition = VisualRecognition('2016-05-20',api_key='f279b7058ce2af35a49946cfef5d352452c1f34c') #My api #visual_recognition = VisualRecognition('2016-05-20',api_key='d6106599a7afdca48a99c48e90a8d1c3dc033f32') #Yeshwant Api def fetchClassification(): with open(join(dirname(__file__),'C:\\Users\\Kumar\\Desktop\\some wallpapers\\random.jpg'),'rb') as filename: return (json.dumps(visual_recognition.classify(images_file=filename,owners='me')))
import json from os.path import join, dirname from os import environ from watson_developer_cloud import VisualRecognitionV3 as VisualRecognition import picamera import subprocess from time import sleep import os import zipfile visual_recognition = VisualRecognition('2016-05-20', api_key='***') with open(join(dirname(__file__), 'surya.zip'), 'rb') as surya, \ open(join(dirname(__file__), 'vijay.zip'), 'rb') as vijay: print( json.dumps(visual_recognition.create_classifier( 'face', surya_positive_examples=surya, vijay_positive_examples=vijay), indent=2))
def __init__(self): self.visual_recognition = VisualRecognition('2016-05-20', api_key=config.api_key) self.downloader = ImageDownloader(config.yoox_rest_endpoint)
password = os.environ.get("STT_PASSWORD")) workspace_id = os.environ.get("WORKSPACE_ID") conversation = ConversationV1( username = os.environ.get("CONVERSATION_USERNAME"), password = os.environ.get("CONVERSATION_PASSWORD"), version='2016-02-11') tts = TextToSpeechV1( username=os.environ.get("TTS_USERNAME"), password=os.environ.get("TTS_PASSWORD"), x_watson_learning_opt_out=True) # Optional flag vr = VisualRecognition( api_key=os.environ.get("VISUALRECOGNITION_API"), version='2016-05-20') classifier_id = os.environ.get("CLASSIFIER_ID") # Create NeoPixel object with appropriate configuration. strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS) # Intialize the library (must be called once before other functions). strip.begin() #Initiaize the camera camera = picamera.PiCamera() #camera needs to be rotated as it is upside-down camera.rotation = 180
import json from os.path import join, dirname from os import environ from watson_developer_cloud import VisualRecognitionV3 as VisualRecognition """VARIABLES""" exampleURL = "https://tinyurl.com/y7oblnh8" carPictures = "cars.zip" truckPictures = "trucks.zip" classID = "" """ OBJECTS """ #Watson Visual Recognition object. Simply call to classify and detect images. visual_recognition = VisualRecognition('2016-05-20', api_key='b5a4ecc127e8c2ddb36b814b57315c09b9192d20') class CarsvsTrucks: """ FUNCTIONS (NOTE: View README.md for more information on what these functions do)""" #Example of pre-classifying images in URL form. #Change the exampleURL variable up above and call this function in order to receive #a report from watson on what it thinks your picture is. def classifyURLImage(): print(json.dumps(visual_recognition.classify(images_url=exampleURL), indent=2)) #Example of creating a custom classifier. #Change the carPictures and the truckPictures variables up above to whatever you have nammed your zip files #that contain the images that will be used to train your A.I..
import json from os.path import join, dirname from os import environ from watson_developer_cloud import VisualRecognitionV3 as VisualRecognition import picamera from time import sleep import os import zipfile #update the clasifier id here classifier_ids = ["face_429689582"] #Update the api key visual_recognition = VisualRecognition(api_key='***') directory = "image" url = None file = open("image.zip", "rb") #intialize the camera camera = picamera.PiCamera() if not os.path.exists(directory): os.makedirs(directory) zipf = zipfile.ZipFile('image.zip', 'w', zipfile.ZIP_DEFLATED) for i in range(10): filePath = 'image/' + str(i) + '.jpg' camera.capture(filePath) sleep(0.1) zipf.write(filePath, os.path.basename(filePath))
from watson_developer_cloud import VisualRecognitionV3 as VisualRecognition import json from os.path import join, dirname from os import environ from dataset import * import numpy as npy visual_recognition = VisualRecognition( '2016-05-20', api_key='4f74058bfd275549a67d7f3724bcdb5ace4da123') datasets = loadDatasetRaw() ''' train_data = datasets[0] train_labels = datasets[1] painting = train_data[0][0] p2 = train_data[0][1] score = train_labels[0] print(score) print("\n##########################################") print("Corrupted painting: " + str(painting.imageFilename())) print("Theme: " + painting.theme) print("Title: " + painting.title) print("Artist: " + painting.artist) print("Style: " + painting.style) print("Genre: " + painting.genre) print("Wiki URL: " + painting.wikiURL) print("Image URL: " + painting.imageURL) print("##########################################\n") ''' img_urls = {} for i in range(len(datasets)):
import json from os.path import join, dirname from watson_developer_cloud import VisualRecognitionV2Beta as VisualRecognition visual_recognition = VisualRecognition(version='2015-12-02', username='******', password='******') # with open(join(dirname(__file__), '../resources/cars.zip'), 'rb') as cars, \ # open(join(dirname(__file__), '../resources/trucks.zip'), 'rb') as trucks: # print(json.dumps(visual_recognition.create_classifier('Cars vs Trucks', positive_examples=cars, # negative_examples=trucks), indent=2)) # with open(join(dirname(__file__), '../resources/car.jpg'), 'rb') as image_file: # print(json.dumps(visual_recognition.classify(image_file), indent=2)) # print(json.dumps(visual_recognition.get_classifier(classifier_id='Tiger'), indent=2)) # The service currently has a bug where even successful deletions return a 404 # print(json.dumps(visual_recognition.delete_classifier(classifier_id='YOUR CLASSIFIER ID'), indent=2)) print(json.dumps(visual_recognition.list_classifiers(), indent=2)) with open(join(dirname(__file__), '../resources/test.jpg'), 'rb') as image_file: print(json.dumps(visual_recognition.classify(image_file, classifier_ids=['Tiger', 'Cat']), indent=2))