# configure connection to mongodb conn = pymongo.MongoClient(cfg['DEFAULT']['_URI']) try: conn.server_info() except Exception as e: logging.error("Unable to connect to {s}".format(s=cfg['DEFAULT']['_URI'])) conn = None sys.exit(1) handle = conn[cfg['DEFAULT']['_DBNAME']][cfg['DEFAULT']['_COLNAME']] print("Connected to Atlas!") # configure connection to watson VisualRecognitionV3 api visual_recognition = VisualRecognitionV3( cfg['DEFAULT']['_WATSONAPIVER'], iam_apikey=cfg['DEFAULT']['_WATSONAPIKEY']) ######### # configure web interface ######### class MainHandler(tornado.web.RequestHandler): def get(self): self.render("Web/index.html", title="Welcome") class WebSockHandler(tornado.websocket.WebSocketHandler): def open(self): print("New client connected") _clients.append(self)
!pip install --upgrade "watson-developer-cloud>=2.4.1" !pip install simplejson import json from watson_developer_cloud import VisualRecognitionV3 visual_recognition = VisualRecognitionV3( '2018-03-19', iam_apikey='1ILZkyzgNMH6FTgPJ8WtT6WR4scQj_LNmkHRFsMN78Pj') # File path will change when run on colab. with open('/images/image1', 'rb') as images_file: classes = visual_recognition.classify( images_file, threshold='0.6', classifier_ids='default').get_result() print(json.dumps(classes, indent=2))
import json from watson_developer_cloud import VisualRecognitionV3 visual_recognition = VisualRecognitionV3( '2018-03-19', iam_apikey='tDYM20kS5sD2jkAApAOrb83RdBO0I2nl-rCuVp10gsJH') with open('./image.jpg', 'rb') as images_file: classes = visual_recognition.classify( images_file, threshold='0.6', classifier_ids='DefaultCustomModel_877446449').get_result() a=classes['images'][0]['classifiers'][0]['classes'][0]['class'] print(a) if a=='persons' : print("authorized person entered") else: print("unauthorized person not entered")
from os.path import join, dirname from os import environ from watson_developer_cloud import VisualRecognitionV3 visual_recognition = VisualRecognitionV3( '2016-05-20', api_key='4a5dce0273f76cfc7fdebaec7d43f6828a512194') def classify(url): return visual_recognition.classify(images_url=url)
def __init__(self): self.url = "https://gateway-a.watsonplatform.net/visual-recognition/api" self.note = "It may take up to 5 minutes for this key to become active" self.api_key = "bbe846d049b62bb116e525f0ad3c6b2989d99613" visual_recognition = VisualRecognitionV3('2016-05-20', api_key=self.api_key)
import time from datetime import datetime import os #using Google Text-to-Speech from gtts import gTTS from pygame import mixer import cv2 import json from watson_developer_cloud import VisualRecognitionV3 import RPi.GPIO as GPIO #Detecting the object using visual recognition service{IBM CLOUD) visual_recognition = VisualRecognitionV3( '2018-03-19', iam_apikey='RLYVVSSFLFSjrr9pueeCaL4pTjEqs-7cLuCaG4njjqym') GPIO.setmode(GPIO.BCM) GPIO.setwarnings(False) #connect ultrasonic sensor to the following pins of the raspberrypi TRIG = 23 ECHO = 24 print "Distance Mesurement In Progress" GPIO.setup(TRIG, GPIO.OUT) GPIO.setup(ECHO, GPIO.IN) print "Waiting for Sensor Data"
import json from os.path import join, dirname from watson_developer_cloud import VisualRecognitionV3 test_url = 'https://www.ibm.com/ibm/ginni/images' \ '/ginni_bio_780x981_v4_03162016.jpg' visual_recognition = VisualRecognitionV3('2016-05-20', api_key='YOUR API KEY') # with open(join(dirname(__file__), '../resources/cars.zip'), 'rb') as cars, \ # open(join(dirname(__file__), '../resources/trucks.zip'), 'rb') as # trucks: # print(json.dumps(visual_recognition.create_classifier('Cars vs Trucks', # cars_positive_examples=cars, # # negative_examples=trucks), indent=2)) car_path = join(dirname(__file__), '../resources/car.jpg') with open(car_path, 'rb') as image_file: car_results = visual_recognition.classify( images_file=image_file, threshold=0.1, classifier_ids=['CarsvsTrucks_1479118188', 'default']) print(json.dumps(car_results, indent=2)) # print(json.dumps(visual_recognition.get_classifier('YOUR CLASSIFIER ID'), # indent=2)) # with open(join(dirname(__file__), '../resources/car.jpg'), 'rb') as # image_file: # print(json.dumps(visual_recognition.update_classifier(
import json from watson_developer_cloud import VisualRecognitionV3 visual_recognition = VisualRecognitionV3( '2016-05-20', api_key='6c9bd2eebc2ca826e9ef864eb8934fdd4c5d259a') classifiers = visual_recognition.list_classifiers(verbose=True) print(json.dumps(classifiers, indent=2))
# !!!!!!! NOTE: this service currently doesnt work since free trial of this service expired on the day of thesis submition import json from os.path import join, dirname from watson_developer_cloud import VisualRecognitionV3 import sys imagefile = str(sys.argv[1]) visual_recognition = VisualRecognitionV3('2016-05-20', api_key='93bc4d7f17a8d0bd011394b97e14b8be369b11ee') with open(imagefile, 'rb') as img: result = visual_recognition.classify(images_file=img, threshold=0.75) jsonresult = json.dumps(result,indent=2) jsonarr = json.loads(jsonresult) i=0 while True: try: label = jsonarr["images"][0]["classifiers"][0]["classes"][i]["class"] score = jsonarr["images"][0]["classifiers"][0]["classes"][i]["score"] print(label + ":" + str(score)) i=i+1 except IndexError: break
import six.moves.urllib as urllib import sys import tarfile import tensorflow as tf import zipfile import json from io import StringIO from PIL import Image from watson_developer_cloud import VisualRecognitionV3 import matplotlib.pyplot as plt import matplotlib.patches as patches # Replace with your api key visual_recognition = VisualRecognitionV3('2016-05-20', api_key='INSERT_API_KEY_HERE') MAX_NUMBER_OF_BOXES = 10 MINIMUM_CONFIDENCE = 0.6 COLORS = ['b', 'g', 'r', 'c', 'm', 'y', 'b', 'w'] # What model to download. MODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017' MODEL_FILE = MODEL_NAME + '.tar.gz' DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/' # Path to frozen detection graph. This is the actual model that is used for the object detection. PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb' print('Downloading model... (This may take over 5 minutes)')
import json from os.path import join, dirname from os import environ from watson_developer_cloud import VisualRecognitionV3 from face import show_webcam test_url = 'http://www.talentedprofiles.com/wp-content/uploads/2016/10/25906514-600x600_t.png' visual_recognition = VisualRecognitionV3( '2016-05-20', api_key='aa3972acdbfb2a4df1cbbab7aaab2686755da530') show_webcam() #print(json.dumps(visual_recognition.create_collection("testcollect"), indent=2)) # with open(join(dirname(__file__), '../FullSizeRender.jpg'), 'rb')\ # as image_file: # print(json.dumps(visual_recognition.add_image("testcollect_baf4a0", image_file, {'name' : 'Johnny Wu'}), indent = 2)) ok = input("Please input your name") print("The image added should be in the resources folder") with open(join(dirname(__file__), '../TartanHacks_S17/resources/vince.png'), 'rb') as image_file: print( json.dumps(visual_recognition.add_image("testcollect_baf4a0", image_file, {'name': ok}), indent=2))
def main(params): visual_recognition = VisualRecognitionV3('2016-05-20', api_key=params["api_key"], url=params["url"]) return visual_recognition.classify(images_url=params["image_url"])
from SimpleCV import Color, Camera, Display import json from os.path import join, dirname from os import environ from watson_developer_cloud import VisualRecognitionV3 visual_recognition = VisualRecognitionV3( '2016-05-20', api_key='30a8e58eb3163f8319ebea84ecd1b1b0027b6038') cam = Camera() img = cam.getImage() img.save("/home/pi/tamara/snap.jpg") with open(join(dirname(__file__), './snap.jpg'), 'rb') as image_file: print( json.dumps(visual_recognition.classify(images_file=image_file, classifier_ids=[ 'greenpepper_194357852', 'orange_2010321797', 'banana_760231312', 'default' ]), indent=2))
#for ibm iot platform import time import sys import ibmiotf.application import ibmiotf.device import random # importing web browser to prepare for simulation import webbrowser #visual recognition import json from watson_developer_cloud import VisualRecognitionV3 visual_recognition = VisualRecognitionV3( '2018-03-19', iam_apikey='KL11qNCtdmrm3mpty13OfABOvu2IVCIMHl2xV06fnFCm') #Provide your IBM Watson Device Credentials organization = "61f75s" deviceType = "raspberrypi" deviceId = "123456" authMethod = "token" authToken = "1234567890" # Initialize GPIO def myCommandCallback(cmd): print("Command received: %s" % cmd.data) #print(type(cmd.data)) i = cmd.data['cmd'] if i == 'alert':
import json from watson_developer_cloud import VisualRecognitionV3 visual_recognition = VisualRecognitionV3( '2019-05-7', iam_apikey='L9k7HYkIssnQ-rrT7_oEWQUJAbyyzKSPv0n_VKfKA-M8') test_url = 'http://192.168.1.8:8080/shot.jpg' #Contains the link to the IPWebcam stream faces = visual_recognition.detect_faces(parameters=json.dumps({'url': test_url})) #faces = visual_recognition.detect_faces(url).get_result() print(json.dumps(faces, indent=2))
import json from watson_developer_cloud import VisualRecognitionV3 visual_recognition = VisualRecognitionV3( '2018-03-19', iam_apikey='*************************************') with open('./fruitbowl.jpg', 'rb') as images_file: classes = visual_recognition.classify(images_file, classifier_ids=["default" ]).get_result() print(json.dumps(classes, indent=2))
#expects python delete_classifier.py classifier_id import sys import json, argparse from watson_developer_cloud import VisualRecognitionV3 parser = argparse.ArgumentParser( description='Send Data for Modeling and Validation') parser.add_argument('--k', help='1 to use the IBM account', type=int, required=True) parser.add_argument('--id', help='classifier ID you wish to delete', type=str, required=True) args = parser.parse_args() free_key = '988d558c4a7e45a98f2aa9f1d52a66d5be30287d' IBM_key = '2dc79bad5c8e2677012abe8fbff37d296cec070c' if (args.k == 1): key = IBM_key else: key = free_key visual_recognition = VisualRecognitionV3('2016-05-20', api_key=key) response = visual_recognition.delete_classifier(classifier_id=args.id) print(json.dumps(response, indent=2))
import cognitive_face as CF import sys import json from watson_developer_cloud import VisualRecognitionV3 # keys ibm e azure ibm_key = 'u-wtKNFlyFRyCLQbTqr05wBPeZ3H7h-ZuY3hCC3a1NSc' azure_key = '8943a700f3ca47d7bdd126b59a27f2d9' sys.path.append('./') # ibm: detecta idade e sexo visual_recognition = VisualRecognitionV3('2018-03-19', iam_apikey=ibm_key) with open('./foto.jpg', 'rb') as images_file: classes = visual_recognition.detect_faces( images_file, threshold='0.6', classifier_ids='default').get_result() print('==========IBM: detecta idade e sexo ============') print(json.dumps(classes, indent=2)) print('================================================\n\n') # azure localização da face azure_key = '8943a700f3ca47d7bdd126b59a27f2d9' # Replace with a valid subscription key (keeping the quotes in place). CF.Key.set(azure_key) BASE_URL = 'https://centralus.api.cognitive.microsoft.com/face/v1.0/' # Replace with your regional Base URL CF.BaseUrl.set(BASE_URL) # You can use this example JPG or replace the URL below with your own URL to a JPEG image. img_url = './foto.jpg' faces = CF.face.detect(img_url) print('==============Azure: localizacao da face=============')
import json from watson_developer_cloud import VisualRecognitionV3 from Xlib import display, X from PIL import Image #PIL import time print("HE") visual_recognition = VisualRecognitionV3( '2018-03-19', api_key='3995db77bd70aa498157544096c84c5741b30668') W, H = 800, 650 dsp = display.Display() root = dsp.screen().root x = 0 while x < 2: raw = root.get_image(200, 200, W, H, X.ZPixmap, 0xffffffff) image = Image.frombytes("RGB", (W, H), raw.data, "raw", "BGRX") time.sleep(.5) classes = visual_recognition.classify(image, threshold='0.6', classifier_ids='humans_1546902740') x += 1 print(json.dumps(classes, indent=2))
import json from watson_developer_cloud import VisualRecognitionV3 import cv2 import os # connecting to the IBM visual recognition instance, insert version and ibm_key respectively below visual_recognition = VisualRecognitionV3( '2018-03-19', iam_apikey=os.environ.get('IBMBETHKEY')) # uncomment this if you already have a video file # vidcap = cv2.VideoCapture('output.avi') # otherwise capture form video cam cap = cv2.VideoCapture(0) vidcap = cap # loop through the video if still recording while(True): # ret, frame = cap.read() # # Display the resulting frame # cv2.imshow('black and white',frame) def getFrame(sec): vidcap = cap vidcap.set(cv2.CAP_PROP_POS_MSEC,sec*1000) hasFrames,image = vidcap.read() if hasFrames: cv2.imwrite("image"+str(count)+".jpg", image) # save frame as JPG file return hasFrames
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'AvO.ui' # # Created by: PyQt5 UI code generator 5.11.2 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets import pandas as pd from watson_developer_cloud import VisualRecognitionV3 from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5.QtCore import QSize from PyQt5.QtGui import QImage, QPalette, QBrush visual_recognition = VisualRecognitionV3(version='{version}', iam_apikey='{apikey}') import json import os from watson_developer_cloud import VisualRecognitionV3 visual_recognition = VisualRecognitionV3('2018-03-19', iam_apikey='Enter_your_apikey') class Ui_MainWindow(QtWidgets.QMainWindow): def setupUi(self, MainWindow): MainWindow.setObjectName("MainWindow") MainWindow.resize(800, 600) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName("centralwidget") self.frame = QtWidgets.QFrame(self.centralwidget) self.frame.setGeometry(QtCore.QRect(0, 0, 791, 571))
import json from watson_developer_cloud import VisualRecognitionV3 visual_recognition = VisualRecognitionV3( '2018-03-19', iam_apikey='GfBGcjYPrd-38OXXTbV1uPjKZ5Xp4o-20av0onxqqUIF') # url = 'https://watson-developer-cloud.github.io/doc-tutorial-downloads/visual-recognition/640px-IBM_VGA_90X8941_on_PS55.jpg' url = "https://upload.wikimedia.org/wikipedia/commons/thumb/5/5d/Leopard_2_A7.JPG/220px-Leopard_2_A7.JPG" classes_result = visual_recognition.classify(url=url).get_result() print(json.dumps(classes_result, indent=2)) maxscore, maxind = 0, 0 for i in range(len(classes_result["images"][0]["classifiers"][0]["classes"])): if classes_result["images"][0]["classifiers"][0]["classes"][i][ "score"] > maxscore: maxscore = classes_result["images"][0]["classifiers"][0]["classes"][i][ "score"] maxind = i print(classes_result["images"][0]["classifiers"][0]["classes"][maxind])
from watson_developer_cloud import VisualRecognitionV3 import json visual_recognition = VisualRecognitionV3( version='2018-03-19', iam_apikey='yxu1EPLR_Ry65MXOkWItzzA4VSDhghPSKLt07UIxAD8F') with open('./banana1.jpg', 'rb') as images_file: classes = visual_recognition.classify( images_file, threshold='0.6', classifier_ids='DefaultCustomModel_793529682').get_result() print(json.dumps(classes, indent=2))
import json from watson_developer_cloud import VisualRecognitionV3 visual_recognition = VisualRecognitionV3( '2018-03-19', iam_apikey='znv5zrsn6pxQsFGAOKtqMw6GJrIYz7XSblKEuN-Kih2d') with open('./images/images.zip', 'rb') as images_file: classes = visual_recognition.classify( images_file, #classifier_ids=["food"]).get_result() classifier_ids=["default"]).get_result() #print(json.dumps(classes, indent=2)) #with open('./response.json', 'w') as outfile: # json.dump(classes, outfile) with open("./response.json", "w") as outfile: json.dump(classes, outfile, indent=4, sort_keys=True)
from watson_developer_cloud import VisualRecognitionV3, WatsonApiException import glob import cv2 import time # KEY = open("../apikey.ini").read() # If service instance provides IAM API key authentication service = VisualRecognitionV3( "2018-03-19", # url is optional, and defaults to the URL below. url="https://gateway.watsonplatform.net/visual-recognition/api", iam_apikey="{apikey}", ) # 画像のパスを投げて顔のjsonデータを返す def detect_face(face_path): try: with open(face_path, "rb") as image_file: return service.detect_faces(images_file=image_file).get_result() # return json.dumps(face_result, indent=2) except WatsonApiException as ex: print(ex) # 認識したjsonを投げて切り取った画像をフォルダに分けて配置する def trimming(img_path, json_data): try: # ファイル名の取得 file_name = img_path.replace("./../image/original/", "") image = json_data["images"][0]
############################################### # author: Gilton Bosco # date: 14 July 2020 # twitter: @giltwizy ############################################### import os from dotenv import load_dotenv import json from watson_developer_cloud import VisualRecognitionV3 #loading the .env file from the root directory load_dotenv() #getting API key from the .env file my_api_key = os.getenv('api_key') visual_recognition = VisualRecognitionV3( '2018-03-19', my_api_key) #getting the image from the root directory with open('./testimage.jpg', 'rb') as images_file: classes = visual_recognition.classify( images_file, threshold='0.6', classifier_ids='default').get_result() print(json.dumps(classes, indent=2))
if counter != 0: commentSentiment = total / counter sentimentComments[str(submission)] = commentSentiment else: sentimentComments[str(submission)] = 0.5 for key in sentimentComments: print(key, sentimentComments[key]) f.close() objectSent = dict() objectOccur = dict() objectIDs = dict() imageObjects = dict() visual_recognition = VisualRecognitionV3( '2016-05-20', api_key='8d7aced8efa9ce11cca985d203dce5989cc20148') for key in submissionUrls: hashmap = dict() listOfClasses = list() wholejson = (visual_recognition.classify(images_url=submissionUrls[key])) images = (json.dumps(wholejson['images'], indent=2)).splitlines() print("Identifying objects in " + submissionUrls[key]) for line in images: if "\"class\":" in line: line = line.replace(",", "") line = line.replace("\"class\": \"", "") line = line.replace("\"", "") line = line.strip() listOfClasses.append(line) # print(line) if line in objectOccur:
from __future__ import print_function import json from os.path import join, dirname from watson_developer_cloud import VisualRecognitionV3, WatsonApiException test_url = 'https://www.ibm.com/ibm/ginni/images' \ '/ginni_bio_780x981_v4_03162016.jpg' visual_recognition = VisualRecognitionV3( '2016-05-20', api_key='kjdV1UXXqdNQRfuQG-DC08BgffvabZEZlgBURQdGzMkn') # with open(join(dirname(__file__), '../resources/cars.zip'), 'rb') as cars, \ # open(join(dirname(__file__), '../resources/trucks.zip'), 'rb') as # trucks: # print(json.dumps(visual_recognition.create_classifier('Cars vs Trucks', # cars_positive_examples=cars, # # negative_examples=trucks), indent=2)) car_path = join(dirname(__file__), '../resources/cars.zip') with open(car_path, 'rb') as images_file: parameters = json.dumps({'threshold': 0.1, 'classifier_ids': ['default']}) car_results = visual_recognition.classify(images_file=images_file, parameters=parameters) print(json.dumps(car_results, indent=2)) # Example with no deprecated try: with open(car_path, 'rb') as images_file: car_results = visual_recognition.classify(images_file=images_file, threshold='0.1',
from __future__ import print_function import json from os.path import abspath from watson_developer_cloud import VisualRecognitionV3, WatsonApiException import os visual_recognition = VisualRecognitionV3( '2018-03-19', url='https://gateway.watsonplatform.net/visual-recognition/api', iam_apikey='_rTi9ExzLh2F_cNt6NRksfQz_sAJ7NhdlXchff5poiF0') def visRec(): clothes = [] filelist = os.listdir("assets/images/") thinking = 0 for i in filelist: if thinking == 5: print("...") thinking = 0 else: thinking += 1 if i.endswith(".jpg") or i.endswith(".jpeg"): with open("assets/images/" + i, 'rb') as images_file: classes = visual_recognition.classify( images_file, threshold='0.5', classifier_ids='DefaultCustomModel_2095219532').get_result() thisItem = classes['images'][0].get('classifiers')[0].get('classes') if len(thisItem) > 1:
import json from watson_developer_cloud import VisualRecognitionV3 visual_recognition = VisualRecognitionV3( version='2018-09-16', api_key='rOzTu6mBpvPgKP96UCVUZ-1m_4CrklQXUqRK1_HkbxXR') with open('./fruitbowl.jpg', 'rb') as images_file: classes = visual_recognition.classify(images_file, threshold='0.6', classifier_ids='default') print(json.dumps(classes, indent=2))