def image_recognition(request): query = request.GET.get("q") args = {} if query: instance = vr(api_key='119030bac8e38e1721e61e0f6a295e18e5d9ecdf', version='2016-05-20') img = instance.classify(images_url=query) args = {'images': img['images'][0]['classifiers'][0]['classes']} return render(request, 'watson/image_recognition.html', args)
def url_recognizer(): wvr = vr( version="2018-03-19", iam_apikey="F_-o_C5ie1HKGU750akv3SsbDwy5dI2yigLVl0TSZVOi", ) img = wvr.classify( url= "https://s2.glbimg.com/3auOxS3cG2mc_H5jFXDpxC7ol-w=/e.glbimg.com/og/ed/f/original/2016/09/12/dr-alan-turing-2956483.jpg" ) return print(img)
# Visual Recognition with IBM Watson - Image Classification # Requirements 1: IBM Bluemix Account (free) - https://console.ng.bluemix.net/ # Requirements 2: watson_developer_cloud module python module # Once registered, login to your account, go to Services -> Watson and create a Visual Recognition instance # Once you have your VR instance created, you will use its API credentials in your python code from watson_developer_cloud import VisualRecognitionV3 as vr # creating a VR instance instance = vr(api_key='paste your api _key here', version='2016-05-20') # select an image (local or url). Copy its location (path or url): img = instance.classify(images_url='url-path-to-img.jpg') # you can run this code in the interpreter. If you request >>> img it will output a json formatted result # getting down the json tree with the following input will display what Watson sees in the image, and the confidence level # >>> img['images'][0]['classifiers'][0]['classes'] # for a better view of the results, you can use pprint import pprint pprint.pprint(img['images'][0]['classifiers'][0]['classes']) # I posted a demo of this here: http://bit.ly/2gZg4D9 # If you need help with Watson and Visual Recognition, send me a message.
# Visual Recognition with IBM Watson - Image Classification # Requirements 1: IBM Bluemix Account (free) - https://console.ng.bluemix.net/ # Requirements 2: watson_developer_cloud module python module # Once registered, login to your account, go to Services -> Watson and create a Visual Recognition instance # Once you have your VR instance created, you will use its API credentials in your python code from watson_developer_cloud import VisualRecognitionV3 as vr # creating a VR instance instance = vr(api_key='143a027a9be556e0c04d17cc87df0e84a6f838b4', version='2016-05-20') # select an image (local or url). Copy its location (path or url): img = instance.classify( images_url= 'http://ichef.bbci.co.uk/wwfeatures/wm/live/624_351/images/live/p0/3d/tk/p03dtkw2.jpg' ) # you can run this code in the interpreter. If you request >>> img it will output a json formatted result # getting down the json tree with the following input will display what Watson sees in the image, and the confidence level # >>> img['images'][0]['classifiers'][0]['classes'] # for a better view of the results, you can use pprint import pprint pprint.pprint(img['images'][0]['classifiers'][0]['classes'])
def __init__(self, name=None, device=None, context=None): App.__init__(self, name, device, context) self.engine = vr('2018-07-10', iam_apikey=self.device.get_encrypted_field('key')) self.jString = ''
# -*- coding: utf-8 -*- import os from os.path import join from watson_developer_cloud import VisualRecognitionV3 as vr import numpy as np vr_instance = vr(version='2016-05-20', api_key='ca62a5844926baf007e5558a1d4c236dbccee838') def find_images(walk_dir): ''' Find images of different labels (corresponding to different subfolders) according to parent folder 'walk_dir'. ''' names = [] fnames = [] str_labels = [] for root, subdirs, files in os.walk(walk_dir): for filename in files: if filename.endswith(('.jpg', '.png', '.JPEG')): full_fname = join(root, filename) names.append(filename) fnames.append(full_fname) str_labels.append(root) [u, labels] = np.unique(str_labels, return_inverse=True) return np.array(fnames), np.array(labels)
#!/usr/bin/env python3 from watson_developer_cloud import VisualRecognitionV3 as vr import json import os import sys try: devKey = os.environ['IBM_DEV_KEY'] except: exit("IBM_DEV_KEY environment variable needs to be set") instance = vr(api_key=devKey, version='2017-01-24') elvisTestDir = '/Users/janae/data/elvisPMs_last100' #testPMs = ['Tony_Blair', 'Gordon_Brown', 'David_Cameron'] testPMs = ['David_Cameron'] # PM found, PM not found, no face found, wrong PM found pmRes = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]] nameUnknown = 'unknown' for i in range(len(testPMs)): testperson = testPMs[i] print(os.path.join(elvisTestDir, testperson)) allImg = [ os.path.join(elvisTestDir, testperson, f) for f in os.listdir(os.path.join(elvisTestDir, testperson)) if os.path.isfile(os.path.join(elvisTestDir, testperson, f)) and f.endswith(".jpg") ] for imgFile in allImg:
print 'metadata: ', folder_metadata #download first image file print " " filelocation = raw_input("Enter file location: (example: /Home/13-15-00.jpg) ") f, metadata = client.get_file_and_metadata(filelocation) print metadata im = Image.open(f) im.show() print client.share(filelocation, short_url = False) fileurl = client.share(filelocation, short_url = False) print fileurl.get('url') lasturl = fileurl.get('url') #examine first image file instance = vr(api_key='put your watson ibm api key here', version='2016-05-20') img = instance.classify(images_url = lasturl) a = 0 for things in img['images'][0]['classifiers'][0]['classes']: if((things['score']*100) > a): a = things['score']*100 first = things['class'] print('\n There is a ' + str(things['score']*100) + ' percent chance the image contains: '+ things['class']) print first #second image file retrieved filelocation2 = raw_input("Enter file location to compare to: (example: /Home/13-15-00.jpg) ") c, metadata = client.get_file_and_metadata(filelocation2)
from watson_developer_cloud import VisualRecognitionV3 as vr import webbrowser import time # creating a VR instance instance = vr(api_key='ff644e192cbf2fcd509165d85a7b053f360ca6a1', version='2016-05-20') # select an image (local or url). Copy its location (path or url): img = instance.classify(images_url='http://i.dailymail.co.uk/i/pix/2011/12/23/article-2077964-0CAA9E3A000005DC-263_468x319.jpg') # you can run this code in the interpreter. If you request >>> img it will output a json formatted result # getting down the json tree with the following input will display what Watson sees in the image, and the confidence level # >>> img['images'][0]['classifiers'][0]['classes'] # for a better view of the results, you can use pprint print "The output given by Watson \n" print(img) print("--------------------------------------------------------------------------------------------------------------------------------------------------------- \n") num_of_results= len(img['images'][0]['classifiers'][0]['classes']) print "The output in a readable user-friendly way \n" print("This image contains the following:") for x in range(0, num_of_results): print img['images'][0]['classifiers'][0]['classes'][x]['class'],'with confidence level:' , img['images'][0]['classifiers'][0]['classes'][x]['score'] print("--------------------------------------------------------------------------------------------------------------------------------------------------------- \n")
def __init__(self, name, device, context): App.__init__(self, name, device, context) self.engine = vr('2016-05-20', api_key=self.device.get_encrypted_field('key'))
from watson_developer_cloud import VisualRecognitionV3 as vr import pprint api_key = input("Enter you api key : ") instance = vr(api_key=api_key, version='2016-05-20') def visualRecog(self): images_url = input("Enter the url of the image :\n") img = self.classify(images_url=images_url) data = img['images'][0]['classifiers'][0]['classes'] pprint.pprint(data) def textRecog(self): images_url = input("Enter the url of the image :\n") img = self.recognize_text(images_url=images_url) print(img['images'][0]['text']) def facialRecog(self): images_url = input("Enter the url of the image :\n") img = self.detect_faces(images_url=images_url) for identity in img['images'][0]['faces']: if ('identity' in identity): print( "Name : " + identity['identity']['name'] + "\n", "Gender : " + identity['gender']['gender'] + "\n",
''' THE MAIN PYTHON CODE TO DRIVE THE RASPBERRY PI- IBM BLUEMIX DUO''' #importing necessary modules import json import sys from os.path import join, dirname from os import environ from watson_developer_cloud import VisualRecognitionV3 as vr from pprint import pprint #instantiating visual_recognition = vr(vr.latest_version, api_key='0234986d7c042310503a1e2477f1f8579a190040') with open( 'C:\Users\Pritesh J Shah\Desktop\VisualRecognition\leaf_negative_examples\leaf69.jpg', 'rb') as image_file: a = json.dumps(visual_recognition.classify( images_file=image_file, threshold=0, classifier_ids=['PestControl_834594187']), indent=2) #retrieving the JSON object js = json.loads(a) #parsing a = js['images'][0]['classifiers'][0]['classes'][0]['score'] #parsing b = js['images'][0]['classifiers'][0]['classes'][1]['score'] #parsing if (a > b): print(js['images'][0]['classifiers'][0]['classes'][0]['class']) else: #checking for pest/not pest print(js['images'][0]['classifiers'][0]['classes'][1]['class']) #at this point we get whether the image contains pests or not.
def __init__(self, name=None, device=None): App.__init__(self, name, device) self.engine = vr('2016-05-20', api_key=self.get_device().get_password())
def initiliazeWatson(): ibmWatson = vr(iam_apikey='qqBbMGQ4qmRaPBLbGENUrJMtt-Xy3PvxQk_sptgYDCzJ',version='2016-05-20') return ibmWatson