Ejemplo n.º 1
0
    def test_03_analyze(self):
        dog_path = os.path.join(os.path.dirname(__file__), '../../resources/dog.jpg')
        giraffe_path = os.path.join(os.path.dirname(__file__),
                                    '../../resources/my-giraffe.jpeg')

        with open(dog_path, 'rb') as dog_file, open(giraffe_path, 'rb') as giraffe_files:
            analyze_images = self.visual_recognition.analyze(
                collection_ids=['684777e5-1f2d-40e3-987f-72d36557ef46'],
                features=[AnalyzeEnums.Features.OBJECTS.value],
                images_file=[
                    FileWithMetadata(dog_file),
                    FileWithMetadata(giraffe_files)
                ],
                image_url=['https://upload.wikimedia.org/wikipedia/commons/thumb/4/47/American_Eskimo_Dog.jpg/1280px-American_Eskimo_Dog.jpg']).get_result()
            assert analyze_images is not None
        print(json.dumps(analyze_images, indent=2))
Ejemplo n.º 2
0
def recognize_faces(img_path=None, img_url=None):
    """
    Call Watson Face Recognition model, to recognize faces in an input images

    :param img_path: local path of input raw image file. Only one of img_path or img_url should be used.
    :param img_url: http/https url for input raw image. Only one of img_path or img_url should be used.
    :return: List of array, each element is the coordinates of faces, based on which faces can be cropped
    """

    try:
        assert (bool(img_path and img_url) is False)
        assert (bool(img_path or img_url) is True)
    except AssertionError:
        print(
            "One and only one of the img_path or img_url should be specified")

    if img_path:
        with open(img_path, 'rb') as img_f:
            analyze_images = service_v4.analyze(
                collection_ids=face_detection_CID,
                features=AnalyzeEnums.Features.OBJECTS.value,
                images_file=[FileWithMetadata(img_f)]).get_result()
    else:
        analyze_images = service_v4.analyze(
            collection_ids=face_detection_CID,
            features=AnalyzeEnums.Features.OBJECTS.value,
            image_url=[img_url]).get_result()
    # return analyze_images
    #print(analyze_images)
    return format_face_coords(analyze_images)
Ejemplo n.º 3
0
 def label_predict(self):
     with open(self.frame, 'rb') as honda_file:
         result = self.visual_recognition.analyze(
             collection_ids=[self.model_id],
             features=[AnalyzeEnums.Features.OBJECTS.value],
             images_file=[FileWithMetadata(honda_file)],
             threshold=0.20).get_result()
         return result['images'][0]['objects']['collections'][0]['objects']
Ejemplo n.º 4
0
def analyze_results(path):
    try:
        with open(path, 'rb') as honda_file:
            result = visual_recognition.analyze(
                collection_ids=[collect_id],
                features=[AnalyzeEnums.Features.OBJECTS.value],
                images_file=[FileWithMetadata(honda_file)],
                threshold=0.25).get_result()
            print("helllo")
            print(result)
            print(type(result))
            return result
    except:
        print(path)
        return None
Ejemplo n.º 5
0
def get_ine():
    apikey = 'i6Ze6gqvd1XrPj624Z-BGbejpQosiVKjnKaW-YVsM1cf'
    url = 'https://api.us-south.visual-recognition.watson.cloud.ibm.com/instances/a48f0673-5423-47cf-8295-d5b52a90921c'
    collection = '031fbbd1-cc79-4cf0-ac3c-e5ffd0c25cdc'

    authenticator = IAMAuthenticator(apikey)
    service = VisualRecognitionV4('2021-04-29', authenticator=authenticator)
    service.set_service_url(url)

    path = '././images/ine_ejemplo.jpg'

    with open(path, 'rb') as ine_img:
        analyze_images = service.analyze(
            collection_ids=[collection],
            features=[AnalyzeEnums.Features.OBJECTS.value],
            images_file=[FileWithMetadata(ine_img)]).get_result()

    print(analyze_images)
    response = json.dumps(analyze_images)
    return Response(response, mimetype='application/json')
Ejemplo n.º 6
0
    def test_04_training(self):
        # create a classifier
        my_collection = self.visual_recognition.create_collection(
            name='my_test_collection',
            description='testing for python').get_result()
        collection_id = my_collection.get('collection_id')
        assert collection_id is not None

        # add images
        with open(
                os.path.join(
                    os.path.dirname(__file__),
                    '../../resources/South_Africa_Luca_Galuzzi_2004.jpeg'),
                'rb') as giraffe_info:
            add_images_result = self.visual_recognition.add_images(
                collection_id,
                images_file=[FileWithMetadata(giraffe_info)],
            ).get_result()
        assert add_images_result is not None
        image_id = add_images_result.get('images')[0].get('image_id')
        assert image_id is not None

        # add image training data
        training_data = self.visual_recognition.add_image_training_data(
            collection_id,
            image_id,
            objects=[
                TrainingDataObject(object='giraffe training data',
                                   location=Location(64, 270, 755, 784))
            ]).get_result()
        assert training_data is not None

        # train collection
        train_result = self.visual_recognition.train(
            collection_id).get_result()
        assert train_result is not None
        assert train_result.get('training_status') is not None

        # delete collection
        self.visual_recognition.delete_collection(collection_id)
Ejemplo n.º 7
0
    '2018-03-19',
    authenticator=authenticator)
service.set_service_url('https://gateway.watsonplatform.net/visual-recognition/api')

# create a classifier
my_collection = service.create_collection(
    name='',
    description='testing for python'
).get_result()
collection_id = my_collection.get('collection_id')

# add images
with open(os.path.join(os.path.dirname(__file__), '../resources/South_Africa_Luca_Galuzzi_2004.jpeg'), 'rb') as giraffe_info:
    add_images_result = service.add_images(
        collection_id,
        images_file=[FileWithMetadata(giraffe_info)],
    ).get_result()
print(json.dumps(add_images_result, indent=2))
image_id = add_images_result.get('images')[0].get('image_id')

# add image training data
training_data = service.add_image_training_data(
    collection_id,
    image_id,
    objects=[
        TrainingDataObject(object='giraffe training data',
                           location=Location(64, 270, 755, 784))
    ]).get_result()

# train collection
train_result = service.train(collection_id).get_result()
    def test_04_objects_and_training(self):
        # create a classifier
        my_collection = self.visual_recognition.create_collection(
            name='my_test_collection',
            description='testing for python').get_result()
        collection_id = my_collection.get('collection_id')
        assert collection_id is not None

        # add images
        with open(
                os.path.join(
                    os.path.dirname(__file__),
                    '../../resources/South_Africa_Luca_Galuzzi_2004.jpeg'),
                'rb') as giraffe_info:
            add_images_result = self.visual_recognition.add_images(
                collection_id,
                images_file=[FileWithMetadata(giraffe_info)],
            ).get_result()
        assert add_images_result is not None
        image_id = add_images_result.get('images')[0].get('image_id')
        assert image_id is not None

        # add image training data
        training_data = self.visual_recognition.add_image_training_data(
            collection_id,
            image_id,
            objects=[
                TrainingDataObject(object='giraffe training data',
                                   location=Location(64, 270, 755, 784))
            ]).get_result()
        assert training_data is not None

        # list objects metadata
        object_metadata_list = self.visual_recognition.list_object_metadata(
            collection_id=collection_id).get_result()
        assert object_metadata_list is not None

        # update object metadata
        object_metadata = object_metadata_list.get('objects')[0]
        updated_object_metadata = self.visual_recognition.update_object_metadata(
            collection_id=collection_id,
            object=object_metadata.get('object'),
            new_object='updated giraffe training data').get_result()
        assert updated_object_metadata is not None

        # get object metadata
        object_metadata = self.visual_recognition.get_object_metadata(
            collection_id=collection_id,
            object='updated giraffe training data',
        ).get_result()
        assert object_metadata is not None
        assert object_metadata.get('object') == 'updated giraffe training data'

        # train collection
        train_result = self.visual_recognition.train(
            collection_id).get_result()
        assert train_result is not None
        assert train_result.get('training_status') is not None

        # training usage
        training_usage = self.visual_recognition.get_training_usage(
            start_time='2019-11-01', end_time='2019-11-27').get_result()
        assert training_usage is not None

        # delete object
        self.visual_recognition.delete_object(
            collection_id, object='updated giraffe training data')

        # delete collection
        self.visual_recognition.delete_collection(collection_id)
    def test_analyze(self):
        endpoint = '/v4/analyze'
        url = '{0}{1}'.format(base_url, endpoint)
        response = {
            "images": [{
                "objects": {
                    "collections": [{
                        "collection_id":
                        "collection_id",
                        "objects": [{
                            "score": 7.0614014,
                            "location": {
                                "top": 1,
                                "left": 5,
                                "width": 5,
                                "height": 2
                            },
                            "object": "object"
                        }, {
                            "score": 7.0614014,
                            "location": {
                                "top": 1,
                                "left": 5,
                                "width": 5,
                                "height": 2
                            },
                            "object": "object"
                        }]
                    }, {
                        "collection_id":
                        "collection_id",
                        "objects": [{
                            "score": 7.0614014,
                            "location": {
                                "top": 1,
                                "left": 5,
                                "width": 5,
                                "height": 2
                            },
                            "object": "object"
                        }, {
                            "score": 7.0614014,
                            "location": {
                                "top": 1,
                                "left": 5,
                                "width": 5,
                                "height": 2
                            },
                            "object": "object"
                        }]
                    }]
                },
                "source": {
                    "archive_filename": "archive_filename",
                    "filename": "filename",
                    "type": "file",
                    "resolved_url": "resolved_url",
                    "source_url": "source_url"
                },
                "errors": {
                    "code": "invalid_field",
                    "message":
                    "The date provided for `version` is not valid. Specify dates in `YYYY-MM-DD` format.",
                    "more_info":
                    "https://cloud.ibm.com/apidocs/visual-recognition-v4#versioning",
                    "target": {
                        "type": "parameter",
                        "name": "version"
                    }
                },
                "dimensions": {
                    "width": 6,
                    "height": 0
                }
            }, {
                "objects": {
                    "collections": [{
                        "collection_id":
                        "collection_id",
                        "objects": [{
                            "score": 7.0614014,
                            "location": {
                                "top": 1,
                                "left": 5,
                                "width": 5,
                                "height": 2
                            },
                            "object": "object"
                        }, {
                            "score": 7.0614014,
                            "location": {
                                "top": 1,
                                "left": 5,
                                "width": 5,
                                "height": 2
                            },
                            "object": "object"
                        }]
                    }, {
                        "collection_id":
                        "collection_id",
                        "objects": [{
                            "score": 7.0614014,
                            "location": {
                                "top": 1,
                                "left": 5,
                                "width": 5,
                                "height": 2
                            },
                            "object": "object"
                        }, {
                            "score": 7.0614014,
                            "location": {
                                "top": 1,
                                "left": 5,
                                "width": 5,
                                "height": 2
                            },
                            "object": "object"
                        }]
                    }]
                },
                "source": {
                    "archive_filename": "archive_filename",
                    "filename": "filename",
                    "type": "file",
                    "resolved_url": "resolved_url",
                    "source_url": "source_url"
                },
                "errors": {
                    "code": "invalid_field",
                    "message":
                    "The date provided for `version` is not valid. Specify dates in `YYYY-MM-DD` format.",
                    "more_info":
                    "https://cloud.ibm.com/apidocs/visual-recognition-v4#versioning",
                    "target": {
                        "type": "parameter",
                        "name": "version"
                    }
                },
                "dimensions": {
                    "width": 6,
                    "height": 0
                }
            }],
            "trace":
            "trace",
            "warnings": [{
                "code": "invalid_field",
                "more_info": "more_info",
                "message": "message"
            }, {
                "code": "invalid_field",
                "more_info": "more_info",
                "message": "message"
            }]
        }
        responses.add(responses.POST,
                      url,
                      body=json.dumps(response),
                      status=200,
                      content_type='application/json')

        authenticator = IAMAuthenticator('bogusapikey')
        service = ibm_watson.VisualRecognitionV4('YYYY-MM-DD',
                                                 authenticator=authenticator)
        service.set_service_url(base_url)

        with open(
                os.path.join(os.path.dirname(__file__),
                             '../../resources/cars.zip'), 'rb') as cars:
            detailed_response = service.analyze(
                collection_ids=['collection_id1, collection_id2'],
                features=[AnalyzeEnums.Features.OBJECTS.value],
                images_file=[FileWithMetadata(cars)],
                image_url=[
                    'https://upload.wikimedia.org/wikipedia/commons/thumb/4/47/American_Eskimo_Dog.jpg/1280px-American_Eskimo_Dog.jpg'
                ],
                threshold='0.2')
        result = detailed_response.get_result()
        assert result is not None
        assert len(responses.calls) == 2
Ejemplo n.º 10
0
import json
from ibm_watson import VisualRecognitionV4
from ibm_watson.visual_recognition_v4 import FileWithMetadata, AnalyzeEnums
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator

apikey = 'YOUR API KEY HERE'
url = 'YOUR URL HERE'
collection = 'YOUR COLLECTION HERE'

authenticator = IAMAuthenticator(apikey)
service = VisualRecognitionV4('2018-03-19', authenticator=authenticator)
service.set_service_url(url)

path = 'Path of your image'

with open(path, 'rb') as mask_img:
    analyze_images = service.analyze(
        collection_ids=[collection],
        features=[AnalyzeEnums.Features.OBJECTS.value],
        images_file=[FileWithMetadata(mask_img)]).get_result()

obj = analyze_images['images'][0]['objects']['collections'][0]['objects'][0][
    'object']
coords = analyze_images['images'][0]['objects']['collections'][0]['objects'][
    0]['location']

from matplotlib import pyplot as plt

cv2.imread(path)
            "language": "english",
            "route": "p",
            "numbers": "8300448682, 9025592443"
        }
        headers = {'cache-control': "no-cache"}
        response = requests.request("GET",
                                    url,
                                    headers=headers,
                                    params=querystring)
        print(response.text)

        with open(picname + '.jpg', 'rb') as img:
            result = visual_recognition.analyze(
                collection_ids=["2db6113b-aaae-4924-b424-a1103669251f"],
                features=[AnalyzeEnums.Features.OBJECTS.value],
                images_file=[FileWithMetadata(img)],
                threshold='0.6').get_result()

        if (result['images'][0]['objects'] != {}):
            with open('Can_Enter.mp3',
                      'wb') as audio_file:  #wb means write bytes
                audio_file.write(
                    text_to_speech.synthesize(
                        'The Gate will open. You can enter now.',
                        voice='en-US_AllisonVoice',
                        accept='audio/mp3').get_result().content)
            playsound('Can_Enter.mp3')

        else:
            with open('Cannot_Enter.mp3',
                      'wb') as audio_file:  #wb means write bytes
Ejemplo n.º 12
0
def camera():
    authenticator = IAMAuthenticator(
        apikey="ZQBgX2TPs2CmC3wNLWGEr7X3xDmq42sDsqTZVCsjdke8")

    flag = False
    obj = VisualRecognitionV4(version='2021-03-25',
                              authenticator=authenticator)

    obj.set_service_url(
        service_url=
        'https://api.us-south.visual-recognition.watson.cloud.ibm.com/instances/2fa93f7c-36f0-48f9-abb1-15845fbe94e1'
    )

    while True:
        success, img = cap.read()
        cv2.imwrite('4.jpg', img)
        with open('4.jpg', 'rb') as honda_file, open('4.jpg',
                                                     'rb') as dice_file:
            result = obj.analyze(
                collection_ids=[
                    "17951ae7-0169-4551-ac19-a3864c7eed65",
                    '17951ae7-0169-4551-ac19-a3864c7eed65'
                ],
                features=[AnalyzeEnums.Features.OBJECTS.value],
                images_file=[
                    FileWithMetadata(honda_file),
                ],
                threshold=0.15).get_result()
            #print(json.dumps(result, indent=2))
        img = cv2.imread('4.jpg')
        try:
            left = result['images'][0]['objects']['collections'][0]['objects'][
                0]['location']['left']
            top = result['images'][0]['objects']['collections'][0]['objects'][
                0]['location']['top']
            width = result['images'][0]['objects']['collections'][0][
                'objects'][0]['location']['width']
            height = result['images'][0]['objects']['collections'][0][
                'objects'][0]['location']['height']

            img = cv2.imread('4.jpg')
            cv2.rectangle(img, (left, top), (left + width, top + height),
                          (255, 0, 0), 2)
            name = result['images'][0]['objects']['collections'][0]['objects'][
                0]['object']
            with open('data.txt', 'r') as f:
                f.write(str(name))
            cv2.putText(img, name, (left, top), cv2.FONT_HERSHEY_SIMPLEX, 1,
                        (255, 0, 0), 2)
            cv2.imshow('gasm', img)
            k = cv2.waitKey(30) & 0xff
            if k == 27:  # press 'ESC' to quit
                break
                cv2.show('gasm', img)

            if name == 'gun' and flag == False:
                flag = True
                SendMail()
        except:
            cv2.imshow('gasm', img)
            # pass
        # cv2.show('gasm',img)
    cap.release()
    # cv2.waitKey(0)
    cv2.destroyAllWindows()