Exemplo n.º 1
0
def main():
    """
    Training for object detection with Azure Custom Vision
    """
    args = parse_args()
    config = json.load(open(args.config, "r"))
    credentials = ApiKeyCredentials(in_headers={"Training-key": config["training_key"]})
    trainer = CustomVisionTrainingClient(config["ENDPOINT"], credentials)

    print("Creating project...")

    # Find the object detection domain
    obj_detection_domain = next(
        domain
        for domain in trainer.get_domains()
        if domain.type == "ObjectDetection" and domain.name == "General"
    )
    project = trainer.create_project(
        config["project_name"], domain_id=obj_detection_domain.id
    )

    # ======================================================================================

    print("Adding images...")
    image_folder = config["image_folder"]
    annotations = json.load(open("annotation.json", "r"))
    tagged_images_with_regions = []
    for label in annotations.keys():
        tagged_images_with_regions += add_image(
            trainer, label, project.id, annotations[label], image_folder
        )

    upload_result = trainer.create_images_from_files(
        project.id, ImageFileCreateBatch(images=tagged_images_with_regions)
    )
    if not upload_result.is_batch_successful:
        print("Image batch upload failed.")
        for image in upload_result.images:
            print("Image status: ", image.status)

    # ======================================================================================
    print("Training...")
    publish_iteration_name = config["publish_iteration_name"]
    prediction_resource_id = config["prediction_resource_id"]
    iteration = trainer.train_project(project.id)
    while iteration.status != "Completed":
        iteration = trainer.get_iteration(project.id, iteration.id)
        print("Training status: " + iteration.status)
        time.sleep(1)

    # The iteration is now trained. Publish it to the project endpoint
    trainer.publish_iteration(
        project.id, iteration.id, publish_iteration_name, prediction_resource_id
    )
    print("Done!")
def upload_images(training_key):
    trainer = CustomVisionTrainingClient(training_key, endpoint=ENDPOINT)

    # Find the object detection domain
    obj_detection_domain = next(domain for domain in trainer.get_domains() if domain.type == "ObjectDetection")

    print("Creating project...")
    try:
        project = trainer.create_project("LEGO Vision", domain_id=obj_detection_domain.id)
    except HttpOperationError:
        print("Project already exists. Using this one.")
        project = trainer.get_project(project_id="71548120-925d-4e59-ba7e-32f99de50240")

    classes = os.path.join(BASE_DIRECTORY, "class_map.txt")
    tags = dict()
    # Make two tags in the new project
    for _class in list(map(lambda line: line.split('\t')[0], open(classes).readlines())):
        try:
            tags[_class] = trainer.create_tag(project.id, _class)
        except HttpOperationError:
            print("Tag already created, continuing...")
            for tag in trainer.get_tags(project_id="71548120-925d-4e59-ba7e-32f99de50240"):
                tags[tag.name] = tag

    # Go through the data table above and create the images
    print("Adding images...")
    tagged_images_with_regions = []

    for image_path in glob.glob(os.path.join(IMAGES_FOLDER, "*.jpg")):
        file_id, extension = image_path.split(".", 1)
        image = cv2.imread(image_path)
        bboxes = read_bboxes(os.path.join(IMAGES_FOLDER, file_id + ".bboxes.tsv"), scale=1, padding=(0, 0, 0, 0))
        labels = read_labels(os.path.join(IMAGES_FOLDER, file_id + ".bboxes.labels.tsv"))
        regions = [Region(tag_id=tags[_class].id, left=bbox[0] / image.shape[1], top=bbox[1] / image.shape[0],
                          width=abs(bbox[0] - bbox[2]) / image.shape[1], height=abs(bbox[1] - bbox[3]) / image.shape[0])
                   for _class, bbox in zip(labels,
                                           bboxes)]
        with open(image_path, mode="rb") as image_contents:
            tagged_images_with_regions.append(ImageFileCreateEntry(name=file_id, contents=image_contents.read(),
                                                                   regions=regions))
    print("Azure Custom Vision can only accept images in batches of max 64 per batch. Cutting list up in batches..")
    for batch in chunks(tagged_images_with_regions, 64):
        trainer.create_images_from_files(project.id, images=batch, tag_ids=[tag.id for tag in tags.values()])
    print("Finished adding images. Visit customvision.ai to start training via the GUI.")
Exemplo n.º 3
0
ENDPOINT = "<your API endpoint>"
training_key = "<your training key>"
prediction_key = "<your prediction key>"
prediction_resource_id = "<your prediction resource id>"

credentials = ApiKeyCredentials(in_headers={"Training-key": training_key})
trainer = CustomVisionTrainingClient(ENDPOINT, credentials)
prediction_credentials = ApiKeyCredentials(
    in_headers={"Prediction-key": prediction_key})
predictor = CustomVisionPredictionClient(ENDPOINT, prediction_credentials)

publish_iteration_name = "detectModel"

# Find the object detection domain
obj_detection_domain = next(
    domain for domain in trainer.get_domains()
    if domain.type == "ObjectDetection" and domain.name == "General")

# Create a new project
print("Creating project...")
project = trainer.create_project("My Detection Project",
                                 domain_id=obj_detection_domain.id)

# Make two tags in the new project
fork_tag = trainer.create_tag(project.id, "fork")
scissors_tag = trainer.create_tag(project.id, "scissors")

fork_image_regions = {
    "fork_1": [0.145833328, 0.3509314, 0.5894608, 0.238562092],
    "fork_2": [0.294117659, 0.216944471, 0.534313738, 0.5980392],
    "fork_3": [0.09191177, 0.0682516545, 0.757352948, 0.6143791],
Exemplo n.º 4
0


ENDPOINT = data['endpoint']
training_key = data['training_key']
prediction_key = data['prediction_key']
prediction_resource_id = data['prediction_resource_id']

publish_iteration_name = "detectModel"

credentials = ApiKeyCredentials(in_headers={"Training-key": training_key})
trainer = CustomVisionTrainingClient(ENDPOINT, credentials)


# Find the object detection domain
obj_detection_domain = next(domain for domain in trainer.get_domains() if domain.type == "ObjectDetection" and domain.name == "General")

# Create a new project
print ("Creating project...")
project = trainer.create_project(data["project_name"] ,domain_id=obj_detection_domain.id)
vehicle_tag = trainer.create_tag(project.id, data["tag"])
print("New project created")








    


ENDPOINT = "https://southcentralus.api.cognitive.microsoft.com"

# Replace with a valid key
training_key = "f13a5db8bb5c4188a7c2ee7bab1f4b9b"
prediction_key = "b48ac6ad4b424b4bb914abc4ee6c8802"
project_id="dbfed461-14c1-4615-b17a-d05db9230f54"
iteration_id="b1fa8ec6-355d-4677-97a7-90aa9eaac4ef"


trainer = CustomVisionTrainingClient(training_key, endpoint=ENDPOINT)

# Find the object detection domain
obj_detection_domain = next(domain for domain in trainer.get_domains() if domain.type == "ObjectDetection")


# Now there is a trained endpoint that can be used to make a prediction

predictor = CustomVisionPredictionClient(prediction_key, endpoint=ENDPOINT)

cap = cv2.VideoCapture(0)
###################################
points=[]
point1=(300,300)
point2=(900,300)
point3=(300,600)
point4=(900,600)
###################################
points.append(point1)
Exemplo n.º 6
0
# Microsoft Cloud Vision - Python Example
# Reference: https://docs.microsoft.com/en-us/azure/cognitive-services/custom-vision-service/python-tutorial-od
from azure.cognitiveservices.vision.customvision.training import CustomVisionTrainingClient
from azure.cognitiveservices.vision.customvision.training.models import ImageFileCreateEntry, Region

# note: needs example update from official website

trainer = CustomVisionTrainingClient(training_key, endpoint=ENDPOINT)

# Find the object detection domain
obj_detection_domain = next(domain for domain in trainer.get_domains()
                            if domain.type == "ObjectDetection")

# Create a new project
print("Creating project...")
project = trainer.create_project("Example Object Detection",
                                 domain_id=obj_detection_domain.id)

# Create image tags
fork_tag = trainer.create_tag(project.id, "fork")
scissors_tag = trainer.create_tag(project.id, "scissors")

fork_image_regions = {
    "fork_1": [0.145833328, 0.3509314, 0.5894608, 0.238562092],
    "fork_2": [0.294117659, 0.216944471, 0.534313738, 0.5980392],
    "fork_3": [0.09191177, 0.0682516545, 0.757352948, 0.6143791],
    "fork_4": [0.254901975, 0.185898721, 0.5232843, 0.594771266],
    "fork_5": [0.2365196, 0.128709182, 0.5845588, 0.71405226],
    "fork_6": [0.115196079, 0.133611143, 0.676470637, 0.6993464],
    "fork_7": [0.164215669, 0.31008172, 0.767156839, 0.410130739],
    "fork_8": [0.118872553, 0.318251669, 0.817401946, 0.225490168],
def train_project(training_key):
    trainer = CustomVisionTrainingClient(training_key, endpoint=ENDPOINT)

    # Find the object detection domain
    obj_detection_domain = next(domain for domain in trainer.get_domains()
                                if domain.type == "ObjectDetection")

    # Create a new project
    print("Creating project...")
    project = trainer.create_project("My Detection Project",
                                     domain_id=obj_detection_domain.id)

    # Make two tags in the new project
    fork_tag = trainer.create_tag(project.id, "fork")
    scissors_tag = trainer.create_tag(project.id, "scissors")

    fork_image_regions = {
        "fork_1": [0.145833328, 0.3509314, 0.5894608, 0.238562092],
        "fork_2": [0.294117659, 0.216944471, 0.534313738, 0.5980392],
        "fork_3": [0.09191177, 0.0682516545, 0.757352948, 0.6143791],
        "fork_4": [0.254901975, 0.185898721, 0.5232843, 0.594771266],
        "fork_5": [0.2365196, 0.128709182, 0.5845588, 0.71405226],
        "fork_6": [0.115196079, 0.133611143, 0.676470637, 0.6993464],
        "fork_7": [0.164215669, 0.31008172, 0.767156839, 0.410130739],
        "fork_8": [0.118872553, 0.318251669, 0.817401946, 0.225490168],
        "fork_9": [0.18259804, 0.2136765, 0.6335784, 0.643790841],
        "fork_10": [0.05269608, 0.282303959, 0.8088235, 0.452614367],
        "fork_11": [0.05759804, 0.0894935, 0.9007353, 0.3251634],
        "fork_12": [0.3345588, 0.07315363, 0.375, 0.9150327],
        "fork_13": [0.269607842, 0.194068655, 0.4093137, 0.6732026],
        "fork_14": [0.143382356, 0.218578458, 0.7977941, 0.295751631],
        "fork_15": [0.19240196, 0.0633497, 0.5710784, 0.8398692],
        "fork_16": [0.140931368, 0.480016381, 0.6838235, 0.240196079],
        "fork_17": [0.305147052, 0.2512582, 0.4791667, 0.5408496],
        "fork_18": [0.234068632, 0.445702642, 0.6127451, 0.344771236],
        "fork_19": [0.219362751, 0.141781077, 0.5919118, 0.6683006],
        "fork_20": [0.180147052, 0.239820287, 0.6887255, 0.235294119]
    }

    scissors_image_regions = {
        "scissors_1": [0.4007353, 0.194068655, 0.259803921, 0.6617647],
        "scissors_2": [0.426470578, 0.185898721, 0.172794119, 0.5539216],
        "scissors_3": [0.289215684, 0.259428144, 0.403186262, 0.421568632],
        "scissors_4": [0.343137264, 0.105833367, 0.332107842, 0.8055556],
        "scissors_5": [0.3125, 0.09766343, 0.435049027, 0.71405226],
        "scissors_6": [0.379901975, 0.24308826, 0.32107842, 0.5718954],
        "scissors_7": [0.341911763, 0.20714055, 0.3137255, 0.6356209],
        "scissors_8": [0.231617644, 0.08459154, 0.504901946, 0.8480392],
        "scissors_9": [0.170343131, 0.332957536, 0.767156839, 0.403594762],
        "scissors_10": [0.204656869, 0.120539248, 0.5245098, 0.743464053],
        "scissors_11": [0.05514706, 0.159754932, 0.799019635, 0.730392158],
        "scissors_12": [0.265931368, 0.169558853, 0.5061275, 0.606209159],
        "scissors_13": [0.241421565, 0.184264734, 0.448529422, 0.6830065],
        "scissors_14": [0.05759804, 0.05027781, 0.75, 0.882352948],
        "scissors_15": [0.191176474, 0.169558853, 0.6936275, 0.6748366],
        "scissors_16": [0.1004902, 0.279036, 0.6911765, 0.477124184],
        "scissors_17": [0.2720588, 0.131977156, 0.4987745, 0.6911765],
        "scissors_18": [0.180147052, 0.112369314, 0.6262255, 0.6666667],
        "scissors_19": [0.333333343, 0.0274019931, 0.443627447, 0.852941155],
        "scissors_20": [0.158088237, 0.04047389, 0.6691176, 0.843137264]
    }

    # Go through the data table above and create the images
    print("Adding images...")
    tagged_images_with_regions = []

    for file_name in fork_image_regions.keys():
        x, y, w, h = fork_image_regions[file_name]
        regions = [
            Region(tag_id=fork_tag.id, left=x, top=y, width=w, height=h)
        ]

        with open(os.path.join(IMAGES_FOLDER, "fork", file_name + ".jpg"),
                  mode="rb") as image_contents:
            tagged_images_with_regions.append(
                ImageFileCreateEntry(name=file_name,
                                     contents=image_contents.read(),
                                     regions=regions))

    for file_name in scissors_image_regions.keys():
        x, y, w, h = scissors_image_regions[file_name]
        regions = [
            Region(tag_id=scissors_tag.id, left=x, top=y, width=w, height=h)
        ]

        with open(os.path.join(IMAGES_FOLDER, "scissors", file_name + ".jpg"),
                  mode="rb") as image_contents:
            tagged_images_with_regions.append(
                ImageFileCreateEntry(name=file_name,
                                     contents=image_contents.read(),
                                     regions=regions))

    trainer.create_images_from_files(project.id,
                                     images=tagged_images_with_regions)

    print("Training...")
    iteration = trainer.train_project(project.id)
    while (iteration.status != "Completed"):
        iteration = trainer.get_iteration(project.id, iteration.id)
        print("Training status: " + iteration.status)
        time.sleep(1)

    # The iteration is now trained. Make it the default project endpoint
    trainer.update_iteration(project.id, iteration.id, is_default=True)
    print("Done!")
    return project, iteration
Exemplo n.º 8
0
from azure.cognitiveservices.vision.customvision.training import CustomVisionTrainingClient
from azure.cognitiveservices.vision.customvision.training.models import ImageFileCreateEntry, Region

ENDPOINT = "https://southcentralus.api.cognitive.microsoft.com"

# Replace with a valid key
training_key = "<your training key>"
#prediction_key = "<your prediction key>"

trainer = CustomVisionTrainingClient(training_key, endpoint=ENDPOINT)

# Select the object detection (compact) domain
print("Domain types:", [d.type for d in trainer.get_domains()])
domains = [domain for domain in trainer.get_domains()]
obj_detection_last_domain = domains[-1]

# Create a new project
print ("Creating project...")
project = trainer.create_project("My Detection Project", domain_id=obj_detection_last_domain.id)

# Make two tags in the new project
all_tags = [trainer.create_tag(project.id, "Apple"),
            trainer.create_tag(project.id, "Banana")
            ]

tag_name_to_id = {}
for t in all_tags:
    tag_name_to_id[t.name] = t.id

# each element is (image file name, list of regions)
# each region is a tuple where the first element is the tag and the remaining are x, y, w, h.
Exemplo n.º 9
0
from azure.cognitiveservices.vision.customvision.training.models import ImageFileCreateEntry, Region
import os
import json

home = os.path.expanduser('~')
vision_auth_file = 'Downloads/vision-auth.json'
ptr = open(os.path.join(home, vision_auth_file), 'r')
vision_auth = json.loads(ptr.read())
ptr.close()

training_key = vision_auth['training_key']
ENDPOINT = vision_auth['ENDPOINT']

trainAPI = CustomVisionTrainingClient(training_key, ENDPOINT)

domains = trainAPI.get_domains()
project = trainAPI.get_projects()[0]

modelPath = os.getcwd() + '/model'
dataPath = os.path.join(modelPath, 'dataset.json')
imagesPath = os.path.join(modelPath, 'images')
print(modelPath, dataPath, imagesPath)

file = open(dataPath)
images = json.loads(file.read())
file.close()

print(project)
existingTags = trainAPI.get_tags(project_id=project.id)

# for tag in existingTags:
Exemplo n.º 10
0
class UploadDataset:
    def __init__(self, files_to_upload: list, project_name: str) -> None:
        self.files_to_upload = files_to_upload

        credentials = ApiKeyCredentials(
            in_headers={"Training-key": TRAINING_KEY})
        self.trainer = CustomVisionTrainingClient(TRAINING_ENDPOINT,
                                                  credentials)

        self.project_name = project_name

        self.max_byte_size = 4000000

        self.project: Project = self._connect_to_or_create_project(
            project_name=self.project_name)
        # Make two tags in the new project
        self.green_car_seal_tag = self._get_or_create_tag("green_car_seal")
        self.red_car_seal_tag = self._get_or_create_tag("red_car_seal")
        self.label_to_tag_id = {
            0: self.red_car_seal_tag.id,
            1: self.green_car_seal_tag.id,
        }

    def _connect_to_or_create_project(self, project_name: str) -> Project:
        projects = self.trainer.get_projects()
        project_id = next((p.id for p in projects if p.name == project_name),
                          None)

        if project_id is not None:
            print("Connecting to existing project...")
            return self.trainer.get_project(project_id)

        print("Creating new project...")
        obj_detection_domain = next(
            domain for domain in self.trainer.get_domains()
            if domain.type == "ObjectDetection" and domain.name == "General")
        return self.trainer.create_project(project_name,
                                           domain_id=obj_detection_domain.id)

    def _get_or_create_tag(self, tag_name) -> Tag:
        tags = self.trainer.get_tags(self.project.id)
        for tag in tags:
            if tag.name == tag_name:
                return self.trainer.get_tag(self.project.id, tag.id)

        return self.trainer.create_tag(self.project.id, tag_name)

    def _read_annotation_file(self, annotation_path: str) -> list:
        annotations = []
        with open(annotation_path, "r") as f:

            for line in f:
                line = line.strip()
                parameter_list = line.split(" ")
                label = int(parameter_list[0])
                x, y, w, h = list(map(float, parameter_list[1:]))

                left = x - w / 2
                if left < 0:  # Accounting for previous rounding error
                    left = 0
                top = y - h / 2
                if top < 0:  # Accounting for previous rounding error
                    top = 0

                if left + w > 1:  # Accounting for previous rounding error
                    w = 1 - left
                if top + h > 1:  # Accounting for previous rounding error
                    h = 1 - top

                try:
                    tag_id = self.label_to_tag_id[label]
                except:
                    raise ValueError(
                        f"Wrong label {label} at {annotation_path}")

                annotations.append(
                    Region(
                        tag_id=tag_id,
                        left=left,
                        top=top,
                        width=w,
                        height=h,
                    ))
        return annotations

    def main(self) -> None:
        dataset_path = os.path.join(os.path.dirname(__file__),
                                    "../../../dataset")

        existing_image_count = self.trainer.get_image_count(
            project_id=self.project.id)
        file_number = existing_image_count
        self.files_to_upload = self.files_to_upload[file_number:]

        for file_name in self.files_to_upload:
            tagged_images_with_regions = []

            annotations: list = self._read_annotation_file(
                annotation_path=os.path.join(dataset_path, "annotations",
                                             file_name + ".txt"), )
            image_bytes: bytes = read_and_resize_image(
                image_path=os.path.join(dataset_path, "images",
                                        file_name + ".JPG"),
                max_byte_size=self.max_byte_size,
            )
            print(f"Image {file_name} is {len(image_bytes)} bytes")
            tagged_images_with_regions.append(
                ImageFileCreateEntry(name=file_name,
                                     contents=image_bytes,
                                     regions=annotations))
            print("Upload images...")
            upload_result = self.trainer.create_images_from_files(
                self.project.id,
                ImageFileCreateBatch(images=tagged_images_with_regions))
            if not upload_result.is_batch_successful:
                print("Image batch upload failed.")
                for image in upload_result.images:
                    print("Image status: ", image.status)
                exit(-1)
            print(
                f"Uploaded file number {file_number+1} of {len(self.files_to_upload)}"
            )
            file_number += 1
# 1. Go to https://customvision.ai/  and sign in with your azure account/ID
# 2. Once you are in, go to Settings (gear icon - upper right corner) and copy the prediction_resource_id
prediction_resource_id ='/subscriptions/<>/resourceGroups/<enter your rg name>/providers/Microsoft.CognitiveServices/accounts/<enter your rg name>_prediction'

# ORIGIN Resource Group Keys **************************************************
training_key = "<enter source training key>"

project_id="<enter source project id>" 

trainer = CustomVisionTrainingClient(training_key, endpoint=ENDPOINT)

dest_trainer = CustomVisionTrainingClient(dest_training_key, endpoint=ENDPOINT)

# Find the image classification domain
classification_domain = next(domain for domain in trainer.get_domains() if domain.type == "Classification")
dest_classification_domain = next(domain for domain in dest_trainer.get_domains() if domain.type == "Classification")


# ## Get the origin project ID reference

# In[5]:


myProjects = trainer.get_projects()


# In[6]:


for project in myProjects: