Beispiel #1
0
 def __init__(self, frame, api_key, model_id):
     self.frame = frame
     self.api_key = api_key
     self.model_id = model_id
     self.authenticator = IAMAuthenticator(self.api_key)
     self.visual_recognition = VisualRecognitionV4(
         version='2019-02-11', authenticator=self.authenticator)
     self.visual_recognition.set_service_url(
         'https://gateway.watsonplatform.net/visual-recognition/api/v4/analyze?'
     )
Beispiel #2
0
def get_ine():
    apikey = 'i6Ze6gqvd1XrPj624Z-BGbejpQosiVKjnKaW-YVsM1cf'
    url = 'https://api.us-south.visual-recognition.watson.cloud.ibm.com/instances/a48f0673-5423-47cf-8295-d5b52a90921c'
    collection = '031fbbd1-cc79-4cf0-ac3c-e5ffd0c25cdc'

    authenticator = IAMAuthenticator(apikey)
    service = VisualRecognitionV4('2021-04-29', authenticator=authenticator)
    service.set_service_url(url)

    path = '././images/ine_ejemplo.jpg'

    with open(path, 'rb') as ine_img:
        analyze_images = service.analyze(
            collection_ids=[collection],
            features=[AnalyzeEnums.Features.OBJECTS.value],
            images_file=[FileWithMetadata(ine_img)]).get_result()

    print(analyze_images)
    response = json.dumps(analyze_images)
    return Response(response, mimetype='application/json')
Beispiel #3
0
from collections import Counter
"""
    System parameters
"""
version_name = '2020-03-20'
Watson_API_key = 'UNj_AQojzpzeg5q5FvmaMu2a3jathuqDB79_DjpAGJb_'

# Watson Visual Recognition URL:
VR_service_url = 'https://api.us-south.visual-recognition.watson.cloud.ibm.com/instances/de0d73f8-faaa-4318-9ed6-492a2822bf20'

# Different ID for each Watson service:
face_detection_CID = "0105d854-d33a-455a-989f-91ce7048982c"
classifier_id = "FaceClassification_746624536"

authenticator = IAMAuthenticator(Watson_API_key)
service_v4 = VisualRecognitionV4(version_name, authenticator=authenticator)
service_v4.set_service_url(VR_service_url)
service_v3 = VisualRecognitionV3(version_name, authenticator=authenticator)


def format_face_coords(ibm_analyze_result):
    """
    Parse the face coords extracted from IBM service_v4.

    :param ibm_analyze_result: the json object directly returned from IBM face detection service_v4
        see an example in "watson_experiment/sample_face_and_result/sample_output.json"
    :return: a list of location, each looks like
                {
                  "left": 64,
                  "top": 72,
                  "width": 124,
Beispiel #4
0
import json
import os
from ibm_watson import VisualRecognitionV4
from ibm_watson.visual_recognition_v4 import FileWithMetadata, TrainingDataObject, Location, AnalyzeEnums
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator

authenticator = IAMAuthenticator(
    'YOUR APIKEY')
service = VisualRecognitionV4(
    '2018-03-19',
    authenticator=authenticator)
service.set_service_url('https://gateway.watsonplatform.net/visual-recognition/api')

# create a classifier
my_collection = service.create_collection(
    name='',
    description='testing for python'
).get_result()
collection_id = my_collection.get('collection_id')

# add images
with open(os.path.join(os.path.dirname(__file__), '../resources/South_Africa_Luca_Galuzzi_2004.jpeg'), 'rb') as giraffe_info:
    add_images_result = service.add_images(
        collection_id,
        images_file=[FileWithMetadata(giraffe_info)],
    ).get_result()
print(json.dumps(add_images_result, indent=2))
image_id = add_images_result.get('images')[0].get('image_id')

# add image training data
training_data = service.add_image_training_data(
import json
from ibm_watson import VisualRecognitionV4
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator

authenticator = IAMAuthenticator(
    '-L_ISbMRp4t_bFoGMrQIeTG2n9Qy3pdd1QmKQxGF3al2')
visual_recognition = VisualRecognitionV4(version='2019-02-11',
                                         authenticator=authenticator)

visual_recognition.set_service_url(
    'https://api.us-south.visual-recognition.watson.cloud.ibm.com/instances/d9acfb96-cfe0-48b5-9859-e26d687fa469'
)

result = visual_recognition.train(
    collection_id='d7169bfd-fa76-4d4f-b96a-05e56218631b').get_result()
print(json.dumps(result, indent=2))
Beispiel #6
0
def camera():
    authenticator = IAMAuthenticator(
        apikey="ZQBgX2TPs2CmC3wNLWGEr7X3xDmq42sDsqTZVCsjdke8")

    flag = False
    obj = VisualRecognitionV4(version='2021-03-25',
                              authenticator=authenticator)

    obj.set_service_url(
        service_url=
        'https://api.us-south.visual-recognition.watson.cloud.ibm.com/instances/2fa93f7c-36f0-48f9-abb1-15845fbe94e1'
    )

    while True:
        success, img = cap.read()
        cv2.imwrite('4.jpg', img)
        with open('4.jpg', 'rb') as honda_file, open('4.jpg',
                                                     'rb') as dice_file:
            result = obj.analyze(
                collection_ids=[
                    "17951ae7-0169-4551-ac19-a3864c7eed65",
                    '17951ae7-0169-4551-ac19-a3864c7eed65'
                ],
                features=[AnalyzeEnums.Features.OBJECTS.value],
                images_file=[
                    FileWithMetadata(honda_file),
                ],
                threshold=0.15).get_result()
            #print(json.dumps(result, indent=2))
        img = cv2.imread('4.jpg')
        try:
            left = result['images'][0]['objects']['collections'][0]['objects'][
                0]['location']['left']
            top = result['images'][0]['objects']['collections'][0]['objects'][
                0]['location']['top']
            width = result['images'][0]['objects']['collections'][0][
                'objects'][0]['location']['width']
            height = result['images'][0]['objects']['collections'][0][
                'objects'][0]['location']['height']

            img = cv2.imread('4.jpg')
            cv2.rectangle(img, (left, top), (left + width, top + height),
                          (255, 0, 0), 2)
            name = result['images'][0]['objects']['collections'][0]['objects'][
                0]['object']
            with open('data.txt', 'r') as f:
                f.write(str(name))
            cv2.putText(img, name, (left, top), cv2.FONT_HERSHEY_SIMPLEX, 1,
                        (255, 0, 0), 2)
            cv2.imshow('gasm', img)
            k = cv2.waitKey(30) & 0xff
            if k == 27:  # press 'ESC' to quit
                break
                cv2.show('gasm', img)

            if name == 'gun' and flag == False:
                flag = True
                SendMail()
        except:
            cv2.imshow('gasm', img)
            # pass
        # cv2.show('gasm',img)
    cap.release()
    # cv2.waitKey(0)
    cv2.destroyAllWindows()
# coding: utf-8

# In[8]:


from ibm_watson import VisualRecognitionV4
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
import json
from ibm_watson.visual_recognition_v4 import AnalyzeEnums, FileWithMetadata
import details

"""Authentication step with IBM"""

authenticator = IAMAuthenticator('{your_api_key}')
visual_recognition = VisualRecognitionV4(
    version='2020/06/14',
    authenticator=authenticator
)

visual_recognition.set_service_url('https://api.us-south.visual-recognition.watson.cloud.ibm.com/instances/ff7b2df2-cfd1-47a5-9bce-778b0661850f')

"""Using IBM Watson Studio to classify image and determine if there is injury"""


"""Threshold set to low since it's safer to assume"""

with open('./frame.jpg', 'rb') as images_file:
    result = visual_recognition.analyze(
        collection_ids=["{collection_id}"],
        features=[AnalyzeEnums.Features.OBJECTS.value]
        images_file=[FileWithMetadata(images_file)],
        threshold='0.5',