# </snippet_describe>
print()
'''
END - Describe an Image - remote
'''
'''
Categorize an Image -  local
This example extracts categories from a local image with a confidence score
'''
print("===== Categorize an Image - local =====")
# Open local image file
local_image = open(local_image_path, "rb")
# Select visual feature type(s)
local_image_features = ["categories"]
# Call API
categorize_results_local = computervision_client.analyze_image_in_stream(
    local_image, local_image_features)

# Print category results with confidence score
print("Categories from local image: ")
if (len(categorize_results_local.categories) == 0):
    print("No categories detected.")
else:
    for category in categorize_results_local.categories:
        print("'{}' with confidence {:.2f}%".format(category.name,
                                                    category.score * 100))
print()
'''
END - Categorize an Image - local
'''

# <snippet_categorize>

'''
Analyze Image - local
This example detects different kinds of objects with bounding boxes and the tags from the image.
'''
print("===== Analyze Image - local =====")
print()
# Get local image with different objects in it
local_image_objects = open(local_image, "rb")
# Opens image to get PIL type of image, for drawing to
image_l = Image.open(local_image)
draw = ImageDraw.Draw(image_l)

# Call API with local image to analyze the image
results_local = computervision_client.analyze_image_in_stream(
    local_image_objects, image_features)

# Show bounding boxes around objects
getObjects(results_local, draw)
# Print tags from image
getTags(results_local)

# Display the image in the users default image browser.
image_l.show()
print()
'''
Detect Objects - remote
This example detects different kinds of objects with bounding boxes in a remote image.
'''
print("===== Analyze Image - remote =====")
print()
示例#3
0
client = ComputerVisionClient(
    endpoint=f"https://{VISION_API_REGION}.api.cognitive.microsoft.com/",
    credentials=CognitiveServicesCredentials(VISION_API_KEY))

print(f"### Using API endpoint: {client.config.endpoint}")

# Open Image
img_file = sys.argv[1]
print(f"### Analyzing {img_file}")
image_file = open(img_file, "rb")

# VisualFeatureTypes can include other features to detect, check the docs
result = client.analyze_image_in_stream(image_file,
                                        raw=RAW_MODE,
                                        visual_features=[
                                            VisualFeatureTypes.description,
                                            VisualFeatureTypes.tags,
                                            VisualFeatureTypes.color,
                                            VisualFeatureTypes.faces
                                        ])

if (RAW_MODE):
    print(json.dumps(result.response.json(), indent=2))
    exit(0)

# Print description
print(f"###\n### That looks like: {result.description.captions[0].text}\n###")

# Print tags
print("### Tags:")
for tag in result.tags:
    print(f" - {tag.name} {tag.confidence:.2f}")
示例#4
0
endpoint = ""

computervision_client = ComputerVisionClient(endpoint, CognitiveServicesCredentials(subscription_key))

while True:

        ret, frame = mainCamera.read()


        #print("Calling Computer Vision...")
        image_features = ["objects"]

        encodedframe = cv2.imencode(".jpg", frame)[1].tostring()
        stream = io.BytesIO(encodedframe)

        r = computervision_client.analyze_image_in_stream(stream, visual_features=image_features, details=None, language='en', description_exclude=None, custom_headers=None, raw=False, callback=None)


        if r != None and r.objects != None and r.objects != []:
            for i in r.objects:
                #print (i.object_property)
                r = i.rectangle

                cv2.rectangle(frame, (r.x, r.y), (r.x+r.w, r.y+r.h), (0, 255, 0), 2)
                cv2.putText(frame,i.object_property, (r.x+10, r.y+20), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,255,0), 2)
                cv2.putText(frame,str(round(i.confidence*100,1))+"%", (r.x+10, r.y+50), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,255,0), 2)


        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
示例#5
0
import os
import vision
# pip install msrest
from msrest.authentication import CognitiveServicesCredentials
# pip install azure-cognitiveservices-vision-computervision
from azure.cognitiveservices.vision.computervision import ComputerVisionClient

cog_key = 'YOUR_COG_KEY'
cog_endpoint = 'YOUR_COG_ENDPOINT'

# Get a client for the computer vision service
computervision_client = ComputerVisionClient(
    cog_endpoint, CognitiveServicesCredentials(cog_key))

# Get the path to an image file
image_path = os.path.join('data', 'iron.jpg')

# Specify the features we want to analyze
features = ['Description', 'Tags', 'Adult', 'Objects', 'Faces']

# Get an analysis from the computer vision service
image_stream = open(image_path, "rb")
analysis = computervision_client.analyze_image_in_stream(
    image_stream, visual_features=features)

# Show the results of analysis (code in vision.py)
vision.show_image_analysis(image_path, analysis)
else:
    for caption in remote_image_description.captions:
        print("'{}' with confidence {:.2f}%".format(caption.text,
                                                    caption.confidence * 100))
#   END - Describe a remote image

# Categorize a local image by:
#   1. Opening the binary file for reading.
#   2. Defining what to extract from the image by initializing an array of VisualFeatureTypes.
#   3. Calling the Computer Vision service's analyze_image_in_stream with the:
#      - image
#      - features to extract
#   4. Displaying the image categories and their confidence values.
local_image = open(local_image_path, "rb")
local_image_features = ["categories"]
local_image_analysis = computervision_client.analyze_image_in_stream(
    local_image, local_image_features)

print("\nCategories from local image: ")
if (len(local_image_analysis.categories) == 0):
    print("No categories detected.")
else:
    for category in local_image_analysis.categories:
        print("'{}' with confidence {:.2f}%".format(category.name,
                                                    category.score * 100))
#   END - Categorize a local image

# Categorize a remote image by:
#   1. Calling the Computer Vision service's analyze_image with the:
#      - image URL
#      - features to extract
#   2. Displaying the image categories and their confidence values.
示例#7
0
# Send provided image (url or path) to azure to analyse.

if is_url(path):
    try:
        headers = {'User-Agent': 'Mozilla/5.0'}
        req = urllib.request.Request(path, headers=headers)

        if urllib.request.urlopen(req).status == 200:
            try:
                analysis = client.analyze_image(path, image_features)
            except Exception as e:
                catch_exception(e, path)

    except urllib.error.URLError:
        sys.exit("Error: The URL does not appear to exist. Please check.\n"
                 f"{path}")
else:
    path = os.path.join(get_cmd_cwd(), path)
    with open(path, 'rb') as fstream:
        try:
            analysis = client.analyze_image_in_stream(fstream, image_features)
        except Exception as e:
            catch_exception(e, path)

for brand in analysis.brands:
    print(f"{brand.rectangle.x} {brand.rectangle.y} " +
          f"{brand.rectangle.x + brand.rectangle.w} " +
          f"{brand.rectangle.y + brand.rectangle.h}," +
          f"{brand.confidence:.2f},{brand.name}")
示例#8
0
class ContentReviewer():
    def __init__(self):
        if not os.path.isfile(TOKEN_PATH):
            raise FileNotFoundError(f"{TOKEN_PATH} not found!")
        with open(TOKEN_PATH) as file:
            tokens = json.load(file)
            self.discord_token = tokens["discord"]
            self.perspective_key = tokens["perspective"]
            self.azure_key = tokens["azure"]
            self.azure_endpoint = tokens["azure_endpoint"]
        self.computervision_client = ComputerVisionClient(self.azure_endpoint, CognitiveServicesCredentials(self.azure_key))
        # Load model
        self.csam_model = load_model('model.h5')
        self.csam_model.compile(
            loss="binary_crossentropy",
            optimizer="adam",
            metrics=["accuracy"]
        )
        self.hashlists = {
            "csam": open("csam.hashlist", "a+")
        }
        self.hashlists["csam"].seek(0)
        self.hashes = {
            "csam": list(int(line, 16) for line in self.hashlists["csam"])
        }

    def review_text(self, message):
        PERSPECTIVE_URL = 'https://commentanalyzer.googleapis.com/v1alpha1/comments:analyze'

        url = PERSPECTIVE_URL + '?key=' + self.perspective_key
        data_dict = {
            'comment': {
                'text': message.content
            },
            'languages': ['en'],
            'requestedAttributes': {
                'SEVERE_TOXICITY': {},
                'IDENTITY_ATTACK': {},
                'INSULT': {},
                'THREAT': {},
                'TOXICITY': {},
                'SPAM': {},
                'SEXUALLY_EXPLICIT': {},
                'FLIRTATION': {}
            },
            'doNotStore': True
        }
        response = requests.post(url, data=json.dumps(data_dict))
        response_dict = response.json()

        scores = {}
        for attr in response_dict["attributeScores"]:
            scores[attr] = response_dict["attributeScores"][attr]["summaryScore"]["value"]

        return scores

    async def review_images(self, message, as_array=False):
        scores_list = []
        for attachment in message.attachments:
            if not attachment.height:
                # Non-image attachments will have no height and should be skipped
                scores_list.append({"GORE": 0, "ADULT": 0, "RACY": 0, "CSAM": 0})
                continue

            scores = {}

            # Download the image to a stream
            file_stream = BytesIO()
            await attachment.save(file_stream, use_cached=True)

            # Turn it into a numpy array via cv2
            file_stream.seek(0)
            arr_img = cv2.imdecode(np.asarray(bytearray(file_stream.read()), dtype=np.uint8), cv2.IMREAD_COLOR)

            # Get a CSAM score for the image
            scores["CSAM"] = self.csam_score(arr_img)

            # Check if this image is in our list of blacklisted hashes
            scores["CSAM_HASH"] = self.hash_compare(arr_img)

            # Use Azure to detect other components (including gory, sexually explicit, and racy images)
            # First seek the image stream back to 0 to be read again
            file_stream.seek(0)
            # Get all the scores mentioned above
            results = self.computervision_client.analyze_image_in_stream(file_stream, ["adult"])
            # Looks for blood and gore to mark as promoting violence or terrorism
            scores["GORE"] = results.adult.gore_score
            # Looks for sexually explicit photos to mark as sexual content
            scores["ADULT"] = results.adult.adult_score
            # Looks for suggestive photos to mark as sexual content with a lower priority
            scores["RACY"] = results.adult.racy_score

            # Add this set of scores to the list to move on to the next attachment
            scores_list.append(scores)
        return scores_list

    def csam_score(self, img):
        # `img` should be a numpy array from cv2
        img = cv2.cvtColor(cv2.resize(img, (IMG_SIZE, IMG_SIZE)), cv2.COLOR_BGR2GRAY)
        img = np.reshape(img, (1, IMG_SIZE, IMG_SIZE, 1 if MODEL_GRAYSCALE else 3))

        return self.csam_model.predict(img)[0][0]

    def hash_compare(self, img):
        # `img` should a be a numpy array from cv2
        img = cv2.resize(img, (IMG_SIZE, IMG_SIZE))
        # Convert it to a PIL Image
        pil = Image.fromarray(img)
        # Calculate this image's hash
        dhash = int(str(difference_hash(pil, hash_size=HASH_SIZE)), 16)

        # Iterate through hashes for any hash that is a difference of less than 6
        for _hash in self.hashes["csam"]:
            hash_difference = bin(_hash ^ dhash).count("1")
            if hash_difference <= 6:
                # If this is a slightly different image, add its hash to the hashlist so we can detect against it too
                if hash_difference > 0:
                    self.save_hash(img)
                return True
        # Return False if we never found a matching hash
        return False

    def save_hash(self, img):
        # `img` should a be a numpy array from cv2
        img = cv2.resize(img, (IMG_SIZE, IMG_SIZE))
        pil = Image.fromarray(img)

        # Calculate this image's hash
        dhash = difference_hash(pil, hash_size=HASH_SIZE)

        # Add this has to our existing in-memory list
        self.hashes["csam"].append(int(str(dhash), 16))
        # Write this has to the csam.hashlist file for the future
        self.hashlists["csam"].write(str(dhash) + "\n")
        # Write immediately since we usually don't end up properly close()-ing our file...
        self.hashlists["csam"].flush()
print("Has racy content: {} with confidence {:.2f}".format(
    detect_adult_results_remote.adult.is_racy_content,
    detect_adult_results_remote.adult.racy_score * 100))
'''
Detect Adult or Racy Content - local
This example detects adult or racy content in a local image, then prints the adult/racy score.
The score is ranged 0.0 - 1.0 with smaller numbers indicating negative results.
'''
print()
print("===== Detect Adult or Racy Content - local =====")
# Open local file
local_image_path = "<folder\\image.jpg>"
local_image = open(local_image_path, "rb")
# Select visual features you want
local_image_features = ["adult"]
# Call API with local image and features
detect_adult_results_local = computervision_client.analyze_image_in_stream(
    local_image, local_image_features)

# Print results with adult/racy score
print("Analyzing local image for adult or racy content ... ")
print("Is adult content: {} with confidence {:.2f}".format(
    detect_adult_results_local.adult.is_adult_content,
    detect_adult_results_local.adult.adult_score * 100))
print("Has racy content: {} with confidence {:.2f}".format(
    detect_adult_results_local.adult.is_racy_content,
    detect_adult_results_local.adult.racy_score * 100))
print()
'''
END - Detect Adult or Racy Content - local
'''
示例#10
0
    request = requests.get(url)
    if request.status_code != 200:
        sys.exit(
            f"Error: The URL does not appear to exist. Please check.\n{url}")
    try:
        analysis = client.analyze_image(
            url, visual_features=[VisualFeatureTypes.tags])
    except Exception as e:
        catch_exception(e, url)

else:
    path = os.path.join(get_cmd_cwd(), url)
    with open(path, 'rb') as fstream:
        # https://docs.microsoft.com/en-us/azure/cognitive-services/custom-vision-service/limits-and-quotas
        size = os.path.getsize(path) / 1000000
        if size > 4:
            sys.exit(
                "The image file is too large for Azure at {size:.2} MB > 4.0 MB. Reduce and try again."
                f"{path}\n"
                "For example, use imagemagick's convert command:\n"
                f"$ convert {path} -resize 25% new.jpg")
        try:
            analysis = client.analyze_image_in_stream(
                fstream, visual_features=[VisualFeatureTypes.tags])
        except Exception as e:
            catch_exception(e, path)

for tag in analysis.tags:
    if tag.confidence > 0.2:
        print("{:4.2f},{}".format(round(tag.confidence, 2), tag.name))
示例#11
0
def upload():
  
  if request.method == 'POST':
     
     req_file = request.files['file']
     tstamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
     local_file_name = tstamp + "-" + req_file.filename
     req_file.save(os.path.join(local_path, local_file_name))
     upfile = os.path.join(local_path, local_file_name) 

     blob_client = BlobClient.from_connection_string(conn_str=connection_string,
      container_name=container_name, blob_name=local_file_name)
     with open(upfile, "rb") as data:
                blob_client.upload_blob(data, blob_type="BlockBlob")

# Now we send it off for analysis
 
     client = ComputerVisionClient(
        endpoint="https://" + computervision_location + ".api.cognitive.microsoft.com/",
        credentials=CognitiveServicesCredentials(computervision_key)
     )

     with open(upfile, "rb") as image_stream:
           image_analysis = client.analyze_image_in_stream(
           image=image_stream,
           visual_features=[
                VisualFeatureTypes.image_type,  # Could use simple str "ImageType"
                VisualFeatureTypes.faces,      # Could use simple str "Faces"
                VisualFeatureTypes.categories,  # Could use simple str "Categories"
                VisualFeatureTypes.objects,      # Could use simple str "Color"
                VisualFeatureTypes.tags,       # Could use simple str "Tags"
                VisualFeatureTypes.description  # Could use simple str "Description"
            ]     
        )
     
     if (len(image_analysis.objects) == 0):
         print("No objects detected.")
         objs="No Objects detected"
     else: 
         objs = [] 
         for tag in image_analysis.objects:
             print("'{}' with confidence {:.2f}%".format(tag.object_property, tag.confidence * 100))
             objs.append("'{}' with confidence {:.2f}%".format(tag.object_property, tag.confidence * 100)) 
     
     if (len(image_analysis.description.captions) == 0):
         print("No captions detected.")
     else:
        for caption in image_analysis.description.captions:
            desc = []
            tstr = "'{}' with confidence {:.2f}%".format(caption.text, caption.confidence * 100)
            desc.append(tstr)
            print(tstr) 

     if (len(image_analysis.faces) == 0):
         print("No faces detected.")
         facs="No Objects detected"
     else: 
         facs = [] 
         for face in image_analysis.faces:
             #print("'{}' with confidence {:.2f}%".format(tag.object_property, tag.confidence * 100))
             facs.append("Gender:  {}  Age: {:d}".format(face.gender, face.age)) 
             print( facs )

     #desc = image_analysis.description.captions[0].text
     img = "https://" + storage_account + ".blob.core.windows.net/" + container_name + "/" + local_file_name
     
     os.remove(upfile)
 
  return render_template("upload.html",file=local_file_name,
   container=container,descr=desc,pic=img,object=objs,faces=facs)
示例#12
0
         for detected_face in detected_faces:
             Happinessprobability = str(detected_face.face_attributes.emotion.happiness)
             Anger =str(detected_face.face_attributes.emotion.anger)
             print('anger:{}, '.format(detected_face.face_attributes.emotion.anger),end = '')
             print('happiness:{}, '.format(detected_face.face_attributes.emotion.happiness),end = '')
             print('contempt:{}, '.format(detected_face.face_attributes.emotion.contempt),end = '')
             print('disgust:{}, '.format(detected_face.face_attributes.emotion.disgust),end = '')
             print('fear:{}, '.format(detected_face.face_attributes.emotion.fear),end = '')
             print('neutral:{}, '.format(detected_face.face_attributes.emotion.neutral),end = '')
             print('sadness:{}, '.format(detected_face.face_attributes.emotion.sadness),end = '')
             print('surprise:{}'.format(detected_face.face_attributes.emotion.surprise))
 
 if iteration % 70 == 0:
     retIMG,bufIMG = cv2.imencode('.jpg', frame)
     streamIMG = io.BytesIO(bufIMG)
     image_analysis = cvclient.analyze_image_in_stream(streamIMG,visual_features=[VisualFeatureTypes.tags])
     #customvision_analysis = cuvclient.classify_image("ecb8eaa5-89f3-4666-a8e9-7f340d055b82","Iteration2",frame)
     print("Found this information in frame:")        
     for tag in image_analysis.tags:
         print('{},'.format(tag.name),end = '')
         if "banana" == tag.name:
             arduino.write(b"B\r\n")
             print("banana")
         if "darth vader" == tag.name:
             arduino.write(b"V\r\n")
             print("darth vader")
         if "glasses" == tag.name or "goggles" == tag.name:
             arduino.write(b"A\r\n")
             print("Terminator")
     print()
 iteration += 1