Exemplo n.º 1
0
 def tearDownClass(cls):
     trainer = CustomVisionTrainingClient(api_key=TRAINING_KEY,
                                          endpoint=ENDPOINT)
     projects = trainer.get_projects()
     for project in projects:
         if project.name.find(PROJECT_PREFIX) == 0:
             trainer.delete_project(project_id=project.id)
    def test_create_2(self):
        """valid setting will not create project on customvision
        will wait until train for the first time
        """
        trainer = CustomVisionTrainingClient(api_key=TRAINING_KEY,
                                             endpoint=ENDPOINT)
        project_count = len(trainer.get_projects())
        project_obj = Project.objects.create(
            setting=Setting.objects.filter(name='valid_setting').first(),
            camera=Camera.objects.filter(name='demo_camera_1').first(),
            location=Location.objects.filter(name='demo_location_1').first(),
            customvision_project_name=f'{PROJECT_PREFIX}-test_create_2',
            is_demo=False)

        project_count_after = len(trainer.get_projects())
        self.assertEqual(project_obj.customvision_project_id, '')
        self.assertEqual(project_count, project_count_after)
Exemplo n.º 3
0
def find_project():
    try:
        trainer = CustomVisionTrainingClient(training_key, endpoint=ENDPOINT)
        for proj in trainer.get_projects():
            if (proj.name == SAMPLE_PROJECT_NAME):
                return proj
    except Exception as e:
        print(str(e))
Exemplo n.º 4
0
def find_project():
    # Use the training API to find the SDK sample project created from the training example.
    trainer = CustomVisionTrainingClient(TRAINING_KEY, endpoint=ENDPOINT)

    for proj in trainer.get_projects():
        if (proj.name == PROJECT_NAME):
            return proj

    return False
Exemplo n.º 5
0
def get_project_id(config):
    """
    Get project ID list
    """
    credentials = ApiKeyCredentials(
        in_headers={"Training-key": config["training_key"]})
    trainer = CustomVisionTrainingClient(config["ENDPOINT"], credentials)
    project_id = next(proj.id for proj in trainer.get_projects()
                      if proj.name == config["project_name"])
    return project_id
Exemplo n.º 6
0
    def tearDown(self, *args, **kwargs):
        trainer = CustomVisionTrainingClient(api_key=self.training_key,
                                             endpoint=self.endpoint)
        projects = trainer.get_projects()
        for project in projects:
            if project.name.find(self.project_prefix) == 0:
                logger.info("Deleting project %s", project.id)
                trainer.delete_project(project_id=project.id)

        super(CustomVisionTestCase, self).tearDown(*args, **kwargs)
Exemplo n.º 7
0
def getPredictionBatch(ENDPOINT, publish_iteration_name, prediction_key,
                       prediction_resource_id, file, training_key,
                       project_name):
    # Now there is a trained endpoint that can be used to make a prediction
    prediction_credentials = ApiKeyCredentials(
        in_headers={"Prediction-key": prediction_key})
    training_credentials = ApiKeyCredentials(
        in_headers={"Training-key": training_key})
    predictor = CustomVisionPredictionClient(ENDPOINT, prediction_credentials)
    trainer = CustomVisionTrainingClient(ENDPOINT, training_credentials)
    projects = trainer.get_projects()

    res_batch = {}
    js_res = {}

    #Retrieve the object dection project and its tags
    #Current assumes one tag
    for p in projects:
        if p.name == project_name:
            project = trainer.get_project(p.id)
            tags = trainer.get_tags(project.id)
            print('Project Found')

    for url in file:
        info = url.split()

        name = info[0]
        url = info[1]
        try:
            response = requests.get(url)
        except:
            print("error retrieving image: " + url)
            exit(-1)
        # Open the sample image and get back the prediction results.
        results = predictor.detect_image(project.id, publish_iteration_name,
                                         response.content)

        # Display the results.
        js_res["vehicle"] = []
        for prediction in results.predictions:

            x = {
                "confidence": "{0:.2f}%".format(prediction.probability * 100),
                "bbox_left": "{0:.2f}".format(prediction.bounding_box.left),
                "bbox_right": "{0:.2f}".format(prediction.bounding_box.top),
                "bbox_width": "{0:.2f}".format(prediction.bounding_box.width),
                "bbox_height": "{0:.2f}".format(prediction.bounding_box.height)
            }

            x = json.dumps(x)
            js_res[prediction.tag_name].append(x)
        res_batch[name] = js_res

    return res_batch
Exemplo n.º 8
0
def fetch_project():
    try:
        print "Get trainer"
        trainer = CustomVisionTrainingClient(trainingKey, endpoint=ENDPOINT)

        print "Get project"
        for proj in trainer.get_projects():
            if (proj.name == PROJECT_NAME):
                return proj
    except Exception as e:
        print str(e)
Exemplo n.º 9
0
    def test_create_2(self):
        """test_create_2.

        Type:
            Positive

        Description:
            Project will not be created on Custom Vision
            until first train or create_project() get
            called.
        """
        trainer = CustomVisionTrainingClient(api_key=self.training_key,
                                             endpoint=self.endpoint)
        project_count = len(trainer.get_projects())
        project_obj = Project.objects.create(
            setting=Setting.objects.filter(name='valid_setting').first(),
            camera=Camera.objects.filter(name='demo_camera_1').first(),
            location=Location.objects.filter(name='demo_location_1').first(),
            customvision_project_name=f'{self.project_prefix}-test_create_2',
            is_demo=False)

        project_count_after = len(trainer.get_projects())
        self.assertEqual(project_obj.customvision_project_id, '')
        self.assertEqual(project_count, project_count_after)
def find_or_train_project():
    try:
        training_key = "e1ceb0b98f0543de9694c3309060c564" # os.environ[TRAINING_KEY_ENV_NAME]
    except KeyError:
        raise SubscriptionKeyError("You need to set the {} env variable.".format(TRAINING_KEY_ENV_NAME))

    # Use the training API to find the SDK sample project created from the training example.
    from custom_vision_training_samples import train_project, SAMPLE_PROJECT_NAME
    trainer = CustomVisionTrainingClient("e1ceb0b98f0543de9694c3309060c564","https://westeurope.api.cognitive.microsoft.com")

    for proj in trainer.get_projects():
        if (proj.name == SAMPLE_PROJECT_NAME):
            return proj

    # Or, if not found, we will run the training example to create it.
    return train_project(training_key)
def find_or_train_project():
    try:
        training_key = os.environ[TRAINING_KEY_ENV_NAME]
    except KeyError:
        raise SubscriptionKeyError(
            "You need to set the {} env variable.".format(
                TRAINING_KEY_ENV_NAME))

    # Use the training API to find the SDK sample project created from the training example.
    from custom_vision_training_samples import train_project, SAMPLE_PROJECT_NAME
    trainer = CustomVisionTrainingClient(training_key, endpoint=ENDPOINT)

    for proj in trainer.get_projects():
        if (proj.name == SAMPLE_PROJECT_NAME):
            return proj

    # Or, if not found, we will run the training example to create it.
    return train_project(training_key)
Exemplo n.º 12
0
def classify(image_path):
    # Replace with a valid key
    prediction_key = "c3741dd758584195b3bcd4331c4ba6c0"
    #prediction_resource_id = "/subscriptions/bad6784e-798e-4bbc-89cf-cb5149375471/resourceGroups/Ecolab(Group)/providers/Microsoft.CognitiveServices/accounts/Ecolab-Prediction"
    training_key = 'd898f79af8f84520994849cf82d4a6be'
    ENDPOINT = "https://ecolab-prediction.cognitiveservices.azure.com/"
    publish_iteration_name = "Ecolab3"
    test_image = image_path
    trainer = CustomVisionTrainingClient(training_key, endpoint=ENDPOINT)
    predictor = CustomVisionPredictionClient(prediction_key, endpoint=ENDPOINT)
    project = trainer.get_projects()[0]

    # Now there is a trained endpoint that can be used to make a prediction

    with open(test_image, "rb") as image_contents:
        results = predictor.classify_image(project.id, publish_iteration_name,
                                           image_contents.read())
        # Display the results.
        for prediction in results.predictions:
            return prediction.tag_name
Exemplo n.º 13
0
def getPrediction(ENDPOINT, publish_iteration_name, prediction_key,
                  prediction_resource_id, img, training_key, project_name):

    # Now there is a trained endpoint that can be used to make a prediction
    prediction_credentials = ApiKeyCredentials(
        in_headers={"Prediction-key": prediction_key})
    training_credentials = ApiKeyCredentials(
        in_headers={"Training-key": training_key})
    predictor = CustomVisionPredictionClient(ENDPOINT, prediction_credentials)
    trainer = CustomVisionTrainingClient(ENDPOINT, training_credentials)
    projects = trainer.get_projects()

    #Retrieve the object dection project and its tags
    #Current assumes one tag
    for p in projects:
        if p.name == project_name:
            project = trainer.get_project(p.id)
            tags = trainer.get_tags(project.id)
            print('Project Found')

    # Open the sample image and get back the prediction results.
    results = predictor.detect_image(project.id, publish_iteration_name, img)
    drawBounds(img, results.predictions)

    # Display the results.
    js_res = defaultdict(list)
    for prediction in results.predictions:
        print(prediction)

        x = {
            "confidence": "{0:.2f}%".format(prediction.probability * 100),
            "bbox_left": "{0:.2f}".format(prediction.bounding_box.left),
            "bbox_right": "{0:.2f}".format(prediction.bounding_box.top),
            "bbox_width": "{0:.2f}".format(prediction.bounding_box.width),
            "bbox_height": "{0:.2f}".format(prediction.bounding_box.height)
        }

        js_res[prediction.tag_name].append(x)

    return js_res
    def __init__(self):
        # Load credentials from environment

        # Authenticate the training client
        credentials = ApiKeyCredentials(
            in_headers={"Training-key": TRAINING_KEY})
        trainer = CustomVisionTrainingClient(TRAINING_ENDPOINT, credentials)

        # Authenticate the prediction client
        prediction_credentials = ApiKeyCredentials(
            in_headers={"Prediction-key": PREDICTION_KEY})
        self.predictor = CustomVisionPredictionClient(PREDICTION_ENDPOINT,
                                                      prediction_credentials)

        project_name = "car_seal_train_and_validation"
        self.publish_iteration_name = "Iteration3"
        self.max_byte_size = 4000000

        projects = trainer.get_projects()
        project_id = next((p.id for p in projects if p.name == project_name),
                          None)

        print("Connecting to existing project...")
        self.project = trainer.get_project(project_id)
Exemplo n.º 15
0
class UploadDataset:
    def __init__(self, files_to_upload: list, project_name: str) -> None:
        self.files_to_upload = files_to_upload

        credentials = ApiKeyCredentials(
            in_headers={"Training-key": TRAINING_KEY})
        self.trainer = CustomVisionTrainingClient(TRAINING_ENDPOINT,
                                                  credentials)

        self.project_name = project_name

        self.max_byte_size = 4000000

        self.project: Project = self._connect_to_or_create_project(
            project_name=self.project_name)
        # Make two tags in the new project
        self.green_car_seal_tag = self._get_or_create_tag("green_car_seal")
        self.red_car_seal_tag = self._get_or_create_tag("red_car_seal")
        self.label_to_tag_id = {
            0: self.red_car_seal_tag.id,
            1: self.green_car_seal_tag.id,
        }

    def _connect_to_or_create_project(self, project_name: str) -> Project:
        projects = self.trainer.get_projects()
        project_id = next((p.id for p in projects if p.name == project_name),
                          None)

        if project_id is not None:
            print("Connecting to existing project...")
            return self.trainer.get_project(project_id)

        print("Creating new project...")
        obj_detection_domain = next(
            domain for domain in self.trainer.get_domains()
            if domain.type == "ObjectDetection" and domain.name == "General")
        return self.trainer.create_project(project_name,
                                           domain_id=obj_detection_domain.id)

    def _get_or_create_tag(self, tag_name) -> Tag:
        tags = self.trainer.get_tags(self.project.id)
        for tag in tags:
            if tag.name == tag_name:
                return self.trainer.get_tag(self.project.id, tag.id)

        return self.trainer.create_tag(self.project.id, tag_name)

    def _read_annotation_file(self, annotation_path: str) -> list:
        annotations = []
        with open(annotation_path, "r") as f:

            for line in f:
                line = line.strip()
                parameter_list = line.split(" ")
                label = int(parameter_list[0])
                x, y, w, h = list(map(float, parameter_list[1:]))

                left = x - w / 2
                if left < 0:  # Accounting for previous rounding error
                    left = 0
                top = y - h / 2
                if top < 0:  # Accounting for previous rounding error
                    top = 0

                if left + w > 1:  # Accounting for previous rounding error
                    w = 1 - left
                if top + h > 1:  # Accounting for previous rounding error
                    h = 1 - top

                try:
                    tag_id = self.label_to_tag_id[label]
                except:
                    raise ValueError(
                        f"Wrong label {label} at {annotation_path}")

                annotations.append(
                    Region(
                        tag_id=tag_id,
                        left=left,
                        top=top,
                        width=w,
                        height=h,
                    ))
        return annotations

    def main(self) -> None:
        dataset_path = os.path.join(os.path.dirname(__file__),
                                    "../../../dataset")

        existing_image_count = self.trainer.get_image_count(
            project_id=self.project.id)
        file_number = existing_image_count
        self.files_to_upload = self.files_to_upload[file_number:]

        for file_name in self.files_to_upload:
            tagged_images_with_regions = []

            annotations: list = self._read_annotation_file(
                annotation_path=os.path.join(dataset_path, "annotations",
                                             file_name + ".txt"), )
            image_bytes: bytes = read_and_resize_image(
                image_path=os.path.join(dataset_path, "images",
                                        file_name + ".JPG"),
                max_byte_size=self.max_byte_size,
            )
            print(f"Image {file_name} is {len(image_bytes)} bytes")
            tagged_images_with_regions.append(
                ImageFileCreateEntry(name=file_name,
                                     contents=image_bytes,
                                     regions=annotations))
            print("Upload images...")
            upload_result = self.trainer.create_images_from_files(
                self.project.id,
                ImageFileCreateBatch(images=tagged_images_with_regions))
            if not upload_result.is_batch_successful:
                print("Image batch upload failed.")
                for image in upload_result.images:
                    print("Image status: ", image.status)
                exit(-1)
            print(
                f"Uploaded file number {file_number+1} of {len(self.files_to_upload)}"
            )
            file_number += 1
Exemplo n.º 16
0
categories = arcpy.GetParameterAsText(1)
trainingkey = arcpy.GetParameterAsText(2)
predictionkey = arcpy.GetParameterAsText(3)
resource_id = arcpy.GetParameterAsText(4)
baseURL = arcpy.GetParameterAsText(5)

whereClause = "1=1"

trainer = CustomVisionTrainingClient(trainingkey, endpoint=baseURL)

categoryList = categories.split(";")

existingProjects = []

#Check existing projects for validity
for project in trainer.get_projects():
    if project.name in categoryList:
        if checkValidProject(trainer, project):
            # Valid project to do predictions with
            existingProjects.append(project.name)
        else:
            # Not a valid project because its either missing tags or hasn't been trained. Remove the model.
            arcpy.AddMessage("Invalid image detection model '{}' found. Rebuilding the model...".format(project.name))
            trainer.delete_project(project.id)

# Create a new project
for name in categoryList:
    #If the project already exists than no new models will be created
    if name not in existingProjects:
        arcpy.AddMessage("Creating Model {}...".format(name))
        project = trainer.create_project(name)
Exemplo n.º 17
0
class Customvision():
    """Custom Vision 操作クラス

    Azure Custom Vision service を操作するためのクラス。
    config.pyに設定した Custom Vision 接続キーがインスタンスに必要。

    Attributes:
        predictor: CustomVisionPredictionClient クラスのインスタンス
        trainer: CustomVisionTrainingClient クラスのインスタンス
        project_name (str): トレーニング済みの Custom Vision Project の名前
        publish_iteration_name (str): 公開されたイテレーションの名前

    """
    def __init__(self, training_key, prediction_key, endpoint, project_name,\
         publish_iteration_name):
        #コンストラクタの定義
        self.predictor = CustomVisionPredictionClient(prediction_key,
                                                      endpoint=endpoint)
        self.trainer = CustomVisionTrainingClient(training_key,
                                                  endpoint=endpoint)
        self.project_name = project_name
        self.publish_iteration_name = publish_iteration_name

    #トレーニング済みの指定プロジェクト検索
    def _get_train_project(self):
        """トレーニング済みの Project を検索して返却

        CustomVisionTrainingClientのインスタンスからポータル上で作成された
        project を取得する。
        取得した project の中から指定した project があるかを比較する。

        Returns:
            指定した project があった場合、 取得した project を返却。
            なかった場合は、None を返却する。

        """
        ret_project = None
        for project in self.trainer.get_projects():
            if project.name == self.project_name:
                ret_project = project
        return ret_project

    #CustomVisionによる解析結果を走行データに格納して返却
    def get_prediction(self, blob_url):
        """走行データリストに Custom Vision 解析結果を格納して返却

        Custom Vision ポータルでトレーニング済みの Project を検索。
        BlobコンテナのURLを指定して画像を解析させ、画像解析結果を取得する。

        Args:
            entities (list): 走行データのリスト
            blob_url (str): BlobコンテナへのアクセスURL

        Returns:
        """
        #トレーニング済みの指定プロジェクト検索
        project = self._get_train_project()

        #CustomVisionによるAI解析開始
        results = self.predictor.classify_image_url(project.id,\
                self.publish_iteration_name, blob_url)

        #数値高い順に並び替え
        predictions = sorted(results.predictions,
                             key=lambda prob: prob.probability,
                             reverse=True)

        return predictions
Exemplo n.º 18
0
import serial

#Defining keys, ids and endpoint to access azure
endpoint = "https://eastus.api.cognitive.microsoft.com/"
training_key = "<Training Key>"
prediction_key = "<Prediction Key>"
resource_id = "/subscriptions/557ea86a-5b35-49e6-8744-907bcc129f41/resourceGroups/HandClassifier/providers/Microsoft.CognitiveServices/accounts/HTN-Resource"

credentials = ApiKeyCredentials(in_headers={"Training-key": training_key})
train = CustomVisionTrainingClient(endpoint, credentials)
prediction_credentials = ApiKeyCredentials(
    in_headers={"Prediction-key": prediction_key})
predict = CustomVisionPredictionClient(endpoint, prediction_credentials)

#Gets the projects available
projects = train.get_projects()

#Finds the project used for HTN - Under projects
for p in projects:
    if p.name == "HandPredictionModel":
        project = p

#Gets the project q
iterations = train.get_iterations("<Project-ID>")

#Opens webcam
vidFeed = cv2.VideoCapture(0)

#Loop that occurs while the webcam is open
while (vidFeed.isOpened()):
    #Defining keypressed (waits and listens for key presses)
credentials = ApiKeyCredentials(in_headers={"Training-key": training_key})
ENDPOINT = "https://packimagerecogniti-prediction.cognitiveservices.azure.com/"

trainer = CustomVisionTrainingClient(ENDPOINT, credentials)

# Now there is a trained endpoint that can be used to make a prediction
prediction_credentials = ApiKeyCredentials(in_headers={"Prediction-key": prediction_key})
predictor = CustomVisionPredictionClient(ENDPOINT, prediction_credentials)

base_image_url = "C:\\Users\\blessonj\\Python\\cognitive-services-python-sdk-samples\\samples\\vision\\"

## Get the project id from the list of projects
## Isolate the one that is doing classification

projects = []
projects = trainer.get_projects()

for project in projects:
    
    if project.name == project_name:
        #print(project.id)
        project_id = project.id
        print(project_id)



with open(base_image_url + "images\\Test\\Test_Joshua.jpg", "rb") as image_contents:
    results = predictor.classify_image(
        projects[0].id, publish_iteration_name, image_contents.read())

    # Display the results.
Exemplo n.º 20
0
if (not os.environ.get('PYTHONHTTPSVERIFY', '') and getattr(ssl, '_create_unverified_context', None)):
    ssl._create_default_https_context = ssl._create_unverified_context


ENDPOINT = "https://canetoadmachinelearning.cognitiveservices.azure.com/"

training_key = "cde7deba2d5d4df5b768b50b700c46b7"
prediction_key = "fb49a542a16a47e6b68b2983db158c32"
prediction_resource_id = "/subscriptions/baa59b08-5ec4-44ea-a907-b12782d8e2a0/resourceGroups/Canetoads/providers/Microsoft.CognitiveServices/accounts/CaneToadMachineLea-Prediction"


credentials = ApiKeyCredentials(in_headers={"Training-key": training_key})
trainer = CustomVisionTrainingClient(ENDPOINT, credentials)

# finding project id
for project in trainer.get_projects():
    if project.name == 'Cane Toad Classifier Python':
        break
publish_iteration_name = "AllImages"

# Now there is a trained endpoint that can be used to make a prediction
prediction_credentials = ApiKeyCredentials(in_headers={"Prediction-key": prediction_key})
predictor = CustomVisionPredictionClient(ENDPOINT, prediction_credentials)




def predictFromImageUrl(testing_image_urls, file_name):
    '''
    Takes a list of image urls, and saves a csv and html file giving the probability of each photo being a cane toad
    :param testing_image_urls: a list of image urls, where each element in the list is a list of two elements; the url
Exemplo n.º 21
0
# Custom Vision modules
from azure.cognitiveservices.vision.customvision.training import CustomVisionTrainingClient
from msrest.authentication import ApiKeyCredentials

from cv_00_credentials import ENDPOINT
from cv_00_credentials import training_key
from cv_00_credentials import prediction_resource_id

credentials = ApiKeyCredentials(in_headers={"Training-key": training_key})
trainer = CustomVisionTrainingClient(ENDPOINT, credentials)

# It is just demo, we use first project in Custom Vison resource
project = trainer.get_projects()[0]
print('Project: ' + project.name)

# It is just demo, we use first iteration in Custom Vison resource
iteration = trainer.get_iterations(project.id)[0]

print('Iteration: ' + iteration.name)

published = trainer.publish_iteration(project.id, iteration.id, iteration.name,
                                      prediction_resource_id)
trainer = CustomVisionTrainingClient(training_key, endpoint=ENDPOINT)

dest_trainer = CustomVisionTrainingClient(dest_training_key, endpoint=ENDPOINT)

# Find the image classification domain
classification_domain = next(domain for domain in trainer.get_domains() if domain.type == "Classification")
dest_classification_domain = next(domain for domain in dest_trainer.get_domains() if domain.type == "Classification")


# ## Get the origin project ID reference

# In[5]:


myProjects = trainer.get_projects()


# In[6]:


for project in myProjects:
    print(project.name)
    print(project.id)
    print(project.description)


# In[7]:


Project = trainer.get_project(project_id=project_id)
Exemplo n.º 23
0
import os
import json

home = os.path.expanduser('~')
vision_auth_file = 'Downloads/vision-auth.json'
ptr = open(os.path.join(home, vision_auth_file), 'r')
vision_auth = json.loads(ptr.read())
ptr.close()

training_key = vision_auth['training_key']
ENDPOINT = vision_auth['ENDPOINT']

trainAPI = CustomVisionTrainingClient(training_key, ENDPOINT)

domains = trainAPI.get_domains()
project = trainAPI.get_projects()[0]

modelPath = os.getcwd() + '/model'
dataPath = os.path.join(modelPath, 'dataset.json')
imagesPath = os.path.join(modelPath, 'images')
print(modelPath, dataPath, imagesPath)

file = open(dataPath)
images = json.loads(file.read())
file.close()

print(project)
existingTags = trainAPI.get_tags(project_id=project.id)

# for tag in existingTags:
#     trainAPI.delete_tag(project.id , tag.id)