示例#1
0
def train_project(subscription_key):

    trainer = CustomVisionTrainingClient(subscription_key, endpoint=ENDPOINT)

    # Create a new project
    print ("Creating project...")
    project = trainer.create_project(SAMPLE_PROJECT_NAME)

    # Make two tags in the new project
    hemlock_tag = trainer.create_tag(project.id, "Hemlock")
    cherry_tag = trainer.create_tag(project.id, "Japanese Cherry")

    print ("Adding images...")
    hemlock_dir = os.path.join(IMAGES_FOLDER, "Hemlock")
    for image in os.listdir(hemlock_dir):
        with open(os.path.join(hemlock_dir, image), mode="rb") as img_data: 
            trainer.create_images_from_data(project.id, img_data.read(), [ hemlock_tag.id ])
    
    cherry_dir = os.path.join(IMAGES_FOLDER, "Japanese Cherry")
    for image in os.listdir(cherry_dir):
        with open(os.path.join(cherry_dir, image), mode="rb") as img_data: 
            trainer.create_images_from_data(project.id, img_data.read(), [ cherry_tag.id ])

    print ("Training...")
    iteration = trainer.train_project(project.id)
    while (iteration.status == "Training"):
        iteration = trainer.get_iteration(project.id, iteration.id)
        print ("Training status: " + iteration.status)
        time.sleep(1)

    # The iteration is now trained. Make it the default project endpoint
    trainer.update_iteration(project.id, iteration.id, is_default=True)
    print ("Done!")
    return project
示例#2
0
def main():
    """
    Training for object detection with Azure Custom Vision
    """
    args = parse_args()
    config = json.load(open(args.config, "r"))
    credentials = ApiKeyCredentials(in_headers={"Training-key": config["training_key"]})
    trainer = CustomVisionTrainingClient(config["ENDPOINT"], credentials)

    print("Creating project...")

    # Find the object detection domain
    obj_detection_domain = next(
        domain
        for domain in trainer.get_domains()
        if domain.type == "ObjectDetection" and domain.name == "General"
    )
    project = trainer.create_project(
        config["project_name"], domain_id=obj_detection_domain.id
    )

    # ======================================================================================

    print("Adding images...")
    image_folder = config["image_folder"]
    annotations = json.load(open("annotation.json", "r"))
    tagged_images_with_regions = []
    for label in annotations.keys():
        tagged_images_with_regions += add_image(
            trainer, label, project.id, annotations[label], image_folder
        )

    upload_result = trainer.create_images_from_files(
        project.id, ImageFileCreateBatch(images=tagged_images_with_regions)
    )
    if not upload_result.is_batch_successful:
        print("Image batch upload failed.")
        for image in upload_result.images:
            print("Image status: ", image.status)

    # ======================================================================================
    print("Training...")
    publish_iteration_name = config["publish_iteration_name"]
    prediction_resource_id = config["prediction_resource_id"]
    iteration = trainer.train_project(project.id)
    while iteration.status != "Completed":
        iteration = trainer.get_iteration(project.id, iteration.id)
        print("Training status: " + iteration.status)
        time.sleep(1)

    # The iteration is now trained. Publish it to the project endpoint
    trainer.publish_iteration(
        project.id, iteration.id, publish_iteration_name, prediction_resource_id
    )
    print("Done!")
示例#3
0
def train_project(subscription_key):
    try:
        prediction_resource_id = os.environ[
            PREDICTION_RESOURCE_ID_KEY_ENV_NAME]
    except KeyError:
        raise PredictionResourceMissingError(
            "Didn't find a prediction resource to publish to. Please set the {} environment variable"
            .format(PREDICTION_RESOURCE_ID_KEY_ENV_NAME))

    trainer = CustomVisionTrainingClient(subscription_key, endpoint=ENDPOINT)

    # Create a new project
    print("Creating project...")
    project = trainer.create_project(SAMPLE_PROJECT_NAME,
                                     classification_type=Classifier.multiclass)

    # Make two tags in the new project
    hemlock_tag = trainer.create_tag(project.id, "Hemlock")
    cherry_tag = trainer.create_tag(project.id, "Japanese Cherry")
    pine_needle_tag = trainer.create_tag(project.id, "Pine Needle Leaves")
    flat_leaf_tag = trainer.create_tag(project.id, "Flat Leaves")

    print("Adding images...")
    hemlock_dir = os.path.join(IMAGES_FOLDER, "Hemlock")
    for image in os.listdir(hemlock_dir):
        with open(os.path.join(hemlock_dir, image), mode="rb") as img_data:
            trainer.create_images_from_data(
                project.id, img_data.read(),
                [hemlock_tag.id, pine_needle_tag.id])

    cherry_dir = os.path.join(IMAGES_FOLDER, "Japanese Cherry")
    for image in os.listdir(cherry_dir):
        with open(os.path.join(cherry_dir, image), mode="rb") as img_data:
            trainer.create_images_from_data(project.id, img_data.read(),
                                            [cherry_tag.id, flat_leaf_tag.id])

    print("Training...")
    iteration = trainer.train_project(project.id)
    while (iteration.status == "Training"):
        iteration = trainer.get_iteration(project.id, iteration.id)
        print("Training status: " + iteration.status)
        time.sleep(1)

    # The iteration is now trained. Name and publish this iteration to a prediciton endpoint
    trainer.publish_iteration(project.id, iteration.id, PUBLISH_ITERATION_NAME,
                              prediction_resource_id)
    print("Done!")

    return project
示例#4
0
    def createModel(self, database, id):

        trainer = CustomVisionTrainingClient(self._trainingKey,
                                             endpoint=self._endPoint)
        project = trainer.create_project(MODEL_ID_PREFIX + "-" +
                                         str(time.time_ns()))

        # reserve neccessory resources from the new model and fill the record into database
        session = database.cursor()
        session.execute(
            "INSERT INTO " + self._datatableName +
            " (id, remote_id) VALUES (%s, %s)", (id, project.id))
        database.commit()

        return True
def upload_images(training_key):
    trainer = CustomVisionTrainingClient(training_key, endpoint=ENDPOINT)

    # Find the object detection domain
    obj_detection_domain = next(domain for domain in trainer.get_domains() if domain.type == "ObjectDetection")

    print("Creating project...")
    try:
        project = trainer.create_project("LEGO Vision", domain_id=obj_detection_domain.id)
    except HttpOperationError:
        print("Project already exists. Using this one.")
        project = trainer.get_project(project_id="71548120-925d-4e59-ba7e-32f99de50240")

    classes = os.path.join(BASE_DIRECTORY, "class_map.txt")
    tags = dict()
    # Make two tags in the new project
    for _class in list(map(lambda line: line.split('\t')[0], open(classes).readlines())):
        try:
            tags[_class] = trainer.create_tag(project.id, _class)
        except HttpOperationError:
            print("Tag already created, continuing...")
            for tag in trainer.get_tags(project_id="71548120-925d-4e59-ba7e-32f99de50240"):
                tags[tag.name] = tag

    # Go through the data table above and create the images
    print("Adding images...")
    tagged_images_with_regions = []

    for image_path in glob.glob(os.path.join(IMAGES_FOLDER, "*.jpg")):
        file_id, extension = image_path.split(".", 1)
        image = cv2.imread(image_path)
        bboxes = read_bboxes(os.path.join(IMAGES_FOLDER, file_id + ".bboxes.tsv"), scale=1, padding=(0, 0, 0, 0))
        labels = read_labels(os.path.join(IMAGES_FOLDER, file_id + ".bboxes.labels.tsv"))
        regions = [Region(tag_id=tags[_class].id, left=bbox[0] / image.shape[1], top=bbox[1] / image.shape[0],
                          width=abs(bbox[0] - bbox[2]) / image.shape[1], height=abs(bbox[1] - bbox[3]) / image.shape[0])
                   for _class, bbox in zip(labels,
                                           bboxes)]
        with open(image_path, mode="rb") as image_contents:
            tagged_images_with_regions.append(ImageFileCreateEntry(name=file_id, contents=image_contents.read(),
                                                                   regions=regions))
    print("Azure Custom Vision can only accept images in batches of max 64 per batch. Cutting list up in batches..")
    for batch in chunks(tagged_images_with_regions, 64):
        trainer.create_images_from_files(project.id, images=batch, tag_ids=[tag.id for tag in tags.values()])
    print("Finished adding images. Visit customvision.ai to start training via the GUI.")
def create_classifier_model(classifier_folder):
    trainer = CustomVisionTrainingClient(TRAINING_KEY, endpoint=ENDPOINT)

    # Create a new project
    project_name = "Seal ID Classifier-" + ITERATION + '-' + str(ACCURACY)
    print ("Creating project... " + project_name)
    project = trainer.create_project(project_name)

    tags = {}
    image_list = []

    for subdir, dirs, files in os.walk(classifier_folder):

        # Make tags for seals
        for subdirname in dirs:
            seal_name = subdirname
            print seal_name

            tags[seal_name] = trainer.create_tag(project.id, seal_name)

        for file in files:

            print("Adding images...")

            path_name = os.path.join(subdir, file)
            seal_folder_name = subdir.split('/')[3]
            with open(path_name, "rb") as image_contents:
                print path_name
                print seal_folder_name
                image_list.append(ImageFileCreateEntry(
                    name=file, contents=image_contents.read(), tag_ids=[tags[seal_folder_name].id]))

            # batch the requests
            if len(image_list) == 60:
                send_images(trainer, project, image_list)

                image_list = []

    # send last images not hitting the 60 limit
    send_images(trainer, project, image_list)
示例#7
0
# Microsoft Cloud Vision - Python Example
# Reference: https://docs.microsoft.com/en-us/azure/cognitive-services/custom-vision-service/python-tutorial-od
from azure.cognitiveservices.vision.customvision.training import CustomVisionTrainingClient
from azure.cognitiveservices.vision.customvision.training.models import ImageFileCreateEntry, Region

# note: needs example update from official website

trainer = CustomVisionTrainingClient(training_key, endpoint=ENDPOINT)

# Find the object detection domain
obj_detection_domain = next(domain for domain in trainer.get_domains()
                            if domain.type == "ObjectDetection")

# Create a new project
print("Creating project...")
project = trainer.create_project("Example Object Detection",
                                 domain_id=obj_detection_domain.id)

# Create image tags
fork_tag = trainer.create_tag(project.id, "fork")
scissors_tag = trainer.create_tag(project.id, "scissors")

fork_image_regions = {
    "fork_1": [0.145833328, 0.3509314, 0.5894608, 0.238562092],
    "fork_2": [0.294117659, 0.216944471, 0.534313738, 0.5980392],
    "fork_3": [0.09191177, 0.0682516545, 0.757352948, 0.6143791],
    "fork_4": [0.254901975, 0.185898721, 0.5232843, 0.594771266],
    "fork_5": [0.2365196, 0.128709182, 0.5845588, 0.71405226],
    "fork_6": [0.115196079, 0.133611143, 0.676470637, 0.6993464],
    "fork_7": [0.164215669, 0.31008172, 0.767156839, 0.410130739],
    "fork_8": [0.118872553, 0.318251669, 0.817401946, 0.225490168],
    "fork_9": [0.18259804, 0.2136765, 0.6335784, 0.643790841],
示例#8
0
ENDPOINT = "https://educ463b.cognitiveservices.azure.com/"
training_key = "7483a7108873428ba1e7197a24fc0f71"
prediction_key = "62fcf010ccba49d58aebee8a37def6b9"
prediction_resource_id = "/subscriptions/95f478e7-5b73-439d-8536-16dc258609f6/resourceGroups/demo/providers/Microsoft.CognitiveServices/accounts/educ463b-Prediction"
credentials = ApiKeyCredentials(in_headers={"Training-key": training_key})
trainer = CustomVisionTrainingClient(ENDPOINT, credentials)
prediction_credentials = ApiKeyCredentials(
    in_headers={"Prediction-key": prediction_key})
predictor = CustomVisionPredictionClient(ENDPOINT, prediction_credentials)

publish_iteration_name = "classifyModel"

# Create a new project
print("Creating project...")
project_name = uuid.uuid4()
project = trainer.create_project(project_name)

# Make two tags in the new project
# seamus_tag = trainer.create_tag(project.id, "seamus")
# finley_tag = trainer.create_tag(project.id, "finley")

base_image_location = '/Users/nickhaber/Downloads/teachable_machine_demo'

print("Adding images...")

#Need to create a list of all ImageFileCreateEntry objects
#with images tagged according to the label
#the control flow here reflects my directory structure
images = {}
filenames = {}
labels = ['seamus', 'finley']
示例#9
0
# Replace with a valid key
training_key = "0c64291277c8425399d3873b8b2a35b0"
prediction_key = "21dcf849cf844333a0c1f45f6e5937aa"
prediction_resource_id = "/subscriptions/4199da6c-8f1e-4bfb-a8c1-62ba5bc76d73/resourceGroups/AzurePy/providers/Microsoft.CognitiveServices/accounts/TestCVP-Prediction"

publish_iteration_name = "classifyModel"

credentials = ApiKeyCredentials(in_headers={"Training-key": training_key})
trainer = CustomVisionTrainingClient(ENDPOINT, credentials)
prediction_credentials = ApiKeyCredentials(
    in_headers={"Prediction-key": prediction_key})
predictor = CustomVisionPredictionClient(ENDPOINT, prediction_credentials)

# Create a new project
print("Creating project...")
project = trainer.create_project("Test")

withMask_tag = trainer.create_tag(project.id, "With Mask")
withoutMask_tag = trainer.create_tag(project.id, "Without Mask")

base_image_url = "./images/"

print("Adding images...")

image_list = []

for image_num in range(1, 6):
    file_name = "withmask_{}.jpg".format(image_num)
    with open(base_image_url + "withmask/" + file_name,
              "rb") as image_contents:
        image_list.append(
示例#10
0
ENDPOINT = "https://treasuretrain-prediction.cognitiveservices.azure.com/"

# Replace with a valid key
training_key = "f14a2b199e614fafbe96e23b17fd58b1"
prediction_key = "8d30ba3f687644c582153ad00394cd9c"
prediction_resource_id = "/subscriptions/b94ca22d-fa8d-4e70-9231-3224c99a1bcf/resourceGroups/treasure_ntub/providers/Microsoft.CognitiveServices/accounts/TreasureTrain-Prediction"

publish_iteration_name = "classifyModel"

credentials = ApiKeyCredentials(in_headers={"Training-key": training_key})
trainer = CustomVisionTrainingClient(ENDPOINT, credentials)

# Create a new project
print("Creating project...")
project = trainer.create_project("treasure-free")

# Make two tags in the new project
Plastic_tag = trainer.create_tag(project.id, "Plastic")
PC_tag = trainer.create_tag(project.id, "PC")
IAC_tag = trainer.create_tag(project.id, "IAC")
Glass_tag = trainer.create_tag(project.id, "Glass")
GG_tag = trainer.create_tag(project.id, "GG")

base_image_url = "./treasure-free/"

print("Adding images...")

image_list = []

for image_num in range(1, 11):
def train_project(training_key):
    trainer = CustomVisionTrainingClient(training_key, endpoint=ENDPOINT)

    # Find the object detection domain
    obj_detection_domain = next(domain for domain in trainer.get_domains()
                                if domain.type == "ObjectDetection")

    # Create a new project
    print("Creating project...")
    project = trainer.create_project("My Detection Project",
                                     domain_id=obj_detection_domain.id)

    # Make two tags in the new project
    fork_tag = trainer.create_tag(project.id, "fork")
    scissors_tag = trainer.create_tag(project.id, "scissors")

    fork_image_regions = {
        "fork_1": [0.145833328, 0.3509314, 0.5894608, 0.238562092],
        "fork_2": [0.294117659, 0.216944471, 0.534313738, 0.5980392],
        "fork_3": [0.09191177, 0.0682516545, 0.757352948, 0.6143791],
        "fork_4": [0.254901975, 0.185898721, 0.5232843, 0.594771266],
        "fork_5": [0.2365196, 0.128709182, 0.5845588, 0.71405226],
        "fork_6": [0.115196079, 0.133611143, 0.676470637, 0.6993464],
        "fork_7": [0.164215669, 0.31008172, 0.767156839, 0.410130739],
        "fork_8": [0.118872553, 0.318251669, 0.817401946, 0.225490168],
        "fork_9": [0.18259804, 0.2136765, 0.6335784, 0.643790841],
        "fork_10": [0.05269608, 0.282303959, 0.8088235, 0.452614367],
        "fork_11": [0.05759804, 0.0894935, 0.9007353, 0.3251634],
        "fork_12": [0.3345588, 0.07315363, 0.375, 0.9150327],
        "fork_13": [0.269607842, 0.194068655, 0.4093137, 0.6732026],
        "fork_14": [0.143382356, 0.218578458, 0.7977941, 0.295751631],
        "fork_15": [0.19240196, 0.0633497, 0.5710784, 0.8398692],
        "fork_16": [0.140931368, 0.480016381, 0.6838235, 0.240196079],
        "fork_17": [0.305147052, 0.2512582, 0.4791667, 0.5408496],
        "fork_18": [0.234068632, 0.445702642, 0.6127451, 0.344771236],
        "fork_19": [0.219362751, 0.141781077, 0.5919118, 0.6683006],
        "fork_20": [0.180147052, 0.239820287, 0.6887255, 0.235294119]
    }

    scissors_image_regions = {
        "scissors_1": [0.4007353, 0.194068655, 0.259803921, 0.6617647],
        "scissors_2": [0.426470578, 0.185898721, 0.172794119, 0.5539216],
        "scissors_3": [0.289215684, 0.259428144, 0.403186262, 0.421568632],
        "scissors_4": [0.343137264, 0.105833367, 0.332107842, 0.8055556],
        "scissors_5": [0.3125, 0.09766343, 0.435049027, 0.71405226],
        "scissors_6": [0.379901975, 0.24308826, 0.32107842, 0.5718954],
        "scissors_7": [0.341911763, 0.20714055, 0.3137255, 0.6356209],
        "scissors_8": [0.231617644, 0.08459154, 0.504901946, 0.8480392],
        "scissors_9": [0.170343131, 0.332957536, 0.767156839, 0.403594762],
        "scissors_10": [0.204656869, 0.120539248, 0.5245098, 0.743464053],
        "scissors_11": [0.05514706, 0.159754932, 0.799019635, 0.730392158],
        "scissors_12": [0.265931368, 0.169558853, 0.5061275, 0.606209159],
        "scissors_13": [0.241421565, 0.184264734, 0.448529422, 0.6830065],
        "scissors_14": [0.05759804, 0.05027781, 0.75, 0.882352948],
        "scissors_15": [0.191176474, 0.169558853, 0.6936275, 0.6748366],
        "scissors_16": [0.1004902, 0.279036, 0.6911765, 0.477124184],
        "scissors_17": [0.2720588, 0.131977156, 0.4987745, 0.6911765],
        "scissors_18": [0.180147052, 0.112369314, 0.6262255, 0.6666667],
        "scissors_19": [0.333333343, 0.0274019931, 0.443627447, 0.852941155],
        "scissors_20": [0.158088237, 0.04047389, 0.6691176, 0.843137264]
    }

    # Go through the data table above and create the images
    print("Adding images...")
    tagged_images_with_regions = []

    for file_name in fork_image_regions.keys():
        x, y, w, h = fork_image_regions[file_name]
        regions = [
            Region(tag_id=fork_tag.id, left=x, top=y, width=w, height=h)
        ]

        with open(os.path.join(IMAGES_FOLDER, "fork", file_name + ".jpg"),
                  mode="rb") as image_contents:
            tagged_images_with_regions.append(
                ImageFileCreateEntry(name=file_name,
                                     contents=image_contents.read(),
                                     regions=regions))

    for file_name in scissors_image_regions.keys():
        x, y, w, h = scissors_image_regions[file_name]
        regions = [
            Region(tag_id=scissors_tag.id, left=x, top=y, width=w, height=h)
        ]

        with open(os.path.join(IMAGES_FOLDER, "scissors", file_name + ".jpg"),
                  mode="rb") as image_contents:
            tagged_images_with_regions.append(
                ImageFileCreateEntry(name=file_name,
                                     contents=image_contents.read(),
                                     regions=regions))

    trainer.create_images_from_files(project.id,
                                     images=tagged_images_with_regions)

    print("Training...")
    iteration = trainer.train_project(project.id)
    while (iteration.status != "Completed"):
        iteration = trainer.get_iteration(project.id, iteration.id)
        print("Training status: " + iteration.status)
        time.sleep(1)

    # The iteration is now trained. Make it the default project endpoint
    trainer.update_iteration(project.id, iteration.id, is_default=True)
    print("Done!")
    return project, iteration
示例#12
0
import os

# You will need to sign in to CustomVision.ai with your account
# Go to settings (the gear in the top right corner)
# replace the ENDPOINT and training_key with the values from your custom vision service settings
# ENDPOINT is just the value of training endpoint with /customvision/v2.2/Training/ removed
# e.g. https://eastcentralus.api.cognitive.microsoft.com

ENDPOINT = "https://southcentralus.api.cognitive.microsoft.com"
training_key = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"

trainer = CustomVisionTrainingClient(training_key, endpoint=ENDPOINT)

# Create a new project
print("Creating project...")
project = trainer.create_project("ElephantNoElephant")
print("Project created")

# Make two tags in the new project
print("Creating tags")
elephant_tag = trainer.create_tag(project.id, "Elephant")
giraffe_tag = trainer.create_tag(project.id, "Giraffe")

print("Adding images...")
# Add all images in Elephant folder to your project with the tag "elephant"
IMAGES_FOLDER = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                             "ElephantGiraffeTrainingImages")
elephant_dir = os.path.join(IMAGES_FOLDER, "Elephant")
for image in os.listdir(elephant_dir):
    with open(os.path.join(elephant_dir, image), mode="rb") as img_data:
        trainer.create_images_from_data(project.id, img_data.read(),
    os.environ['training_key'] = data['training_key']
    os.environ['prediction_resource_id'] = data['prediction_resource_id']

# All the keys will be put in a credentials file later
ENDPOINT = os.environ.get('resource_endpoint')
training_key = os.environ.get('training_key')
prediction_resource_id = os.environ.get('prediction_resource_id')

publish_iteration_name = "classifyModel"

credentials = ApiKeyCredentials(in_headers={"Training-key": training_key})
trainer = CustomVisionTrainingClient(ENDPOINT, credentials)

# Create a new project
print("[INFO] Creating project")
project = trainer.create_project("COVID19-detector-based-on-CTscan-img")

positive_tag = trainer.create_tag(project.id, "Positive")
negative_tag = trainer.create_tag(project.id, "Negative")

print("[INFO] Uploading data")
image_list = []

for filename in os.listdir(args["covid"]):
    img_path = os.path.sep.join([args["covid"], filename])
    with open(img_path, "rb") as image_contents:
        image_list.append(
            ImageFileCreateEntry(name=filename,
                                 contents=image_contents.read(),
                                 tag_ids=[positive_tag.id]))
示例#14
0
training_key = data['training_key']
prediction_key = data['prediction_key']
prediction_resource_id = data['prediction_resource_id']

publish_iteration_name = "detectModel"

credentials = ApiKeyCredentials(in_headers={"Training-key": training_key})
trainer = CustomVisionTrainingClient(ENDPOINT, credentials)


# Find the object detection domain
obj_detection_domain = next(domain for domain in trainer.get_domains() if domain.type == "ObjectDetection" and domain.name == "General")

# Create a new project
print ("Creating project...")
project = trainer.create_project(data["project_name"] ,domain_id=obj_detection_domain.id)
vehicle_tag = trainer.create_tag(project.id, data["tag"])
print("New project created")












credentials = ApiKeyCredentials(in_headers={"Training-key": training_key})
trainer = CustomVisionTrainingClient(ENDPOINT, credentials)
prediction_credentials = ApiKeyCredentials(
    in_headers={"Prediction-key": prediction_key})
predictor = CustomVisionPredictionClient(ENDPOINT, prediction_credentials)
# </snippet_auth>

# <snippet_create>
publish_iteration_name = "classifyModel"

credentials = ApiKeyCredentials(in_headers={"Training-key": training_key})
trainer = CustomVisionTrainingClient(ENDPOINT, credentials)

# Create a new project
print("Creating project...")
project = trainer.create_project("My New Project")
# </snippet_create>

# <snippet_tags>
# Make two tags in the new project
hemlock_tag = trainer.create_tag(project.id, "Hemlock")
cherry_tag = trainer.create_tag(project.id, "Japanese Cherry")
# </snippet_tags>

# <snippet_upload>
base_image_location = "<path to repo directory>/cognitive-services-python-sdk-samples/samples/vision/"

print("Adding images...")

image_list = []
示例#16
0
class UploadDataset:
    def __init__(self, files_to_upload: list, project_name: str) -> None:
        self.files_to_upload = files_to_upload

        credentials = ApiKeyCredentials(
            in_headers={"Training-key": TRAINING_KEY})
        self.trainer = CustomVisionTrainingClient(TRAINING_ENDPOINT,
                                                  credentials)

        self.project_name = project_name

        self.max_byte_size = 4000000

        self.project: Project = self._connect_to_or_create_project(
            project_name=self.project_name)
        # Make two tags in the new project
        self.green_car_seal_tag = self._get_or_create_tag("green_car_seal")
        self.red_car_seal_tag = self._get_or_create_tag("red_car_seal")
        self.label_to_tag_id = {
            0: self.red_car_seal_tag.id,
            1: self.green_car_seal_tag.id,
        }

    def _connect_to_or_create_project(self, project_name: str) -> Project:
        projects = self.trainer.get_projects()
        project_id = next((p.id for p in projects if p.name == project_name),
                          None)

        if project_id is not None:
            print("Connecting to existing project...")
            return self.trainer.get_project(project_id)

        print("Creating new project...")
        obj_detection_domain = next(
            domain for domain in self.trainer.get_domains()
            if domain.type == "ObjectDetection" and domain.name == "General")
        return self.trainer.create_project(project_name,
                                           domain_id=obj_detection_domain.id)

    def _get_or_create_tag(self, tag_name) -> Tag:
        tags = self.trainer.get_tags(self.project.id)
        for tag in tags:
            if tag.name == tag_name:
                return self.trainer.get_tag(self.project.id, tag.id)

        return self.trainer.create_tag(self.project.id, tag_name)

    def _read_annotation_file(self, annotation_path: str) -> list:
        annotations = []
        with open(annotation_path, "r") as f:

            for line in f:
                line = line.strip()
                parameter_list = line.split(" ")
                label = int(parameter_list[0])
                x, y, w, h = list(map(float, parameter_list[1:]))

                left = x - w / 2
                if left < 0:  # Accounting for previous rounding error
                    left = 0
                top = y - h / 2
                if top < 0:  # Accounting for previous rounding error
                    top = 0

                if left + w > 1:  # Accounting for previous rounding error
                    w = 1 - left
                if top + h > 1:  # Accounting for previous rounding error
                    h = 1 - top

                try:
                    tag_id = self.label_to_tag_id[label]
                except:
                    raise ValueError(
                        f"Wrong label {label} at {annotation_path}")

                annotations.append(
                    Region(
                        tag_id=tag_id,
                        left=left,
                        top=top,
                        width=w,
                        height=h,
                    ))
        return annotations

    def main(self) -> None:
        dataset_path = os.path.join(os.path.dirname(__file__),
                                    "../../../dataset")

        existing_image_count = self.trainer.get_image_count(
            project_id=self.project.id)
        file_number = existing_image_count
        self.files_to_upload = self.files_to_upload[file_number:]

        for file_name in self.files_to_upload:
            tagged_images_with_regions = []

            annotations: list = self._read_annotation_file(
                annotation_path=os.path.join(dataset_path, "annotations",
                                             file_name + ".txt"), )
            image_bytes: bytes = read_and_resize_image(
                image_path=os.path.join(dataset_path, "images",
                                        file_name + ".JPG"),
                max_byte_size=self.max_byte_size,
            )
            print(f"Image {file_name} is {len(image_bytes)} bytes")
            tagged_images_with_regions.append(
                ImageFileCreateEntry(name=file_name,
                                     contents=image_bytes,
                                     regions=annotations))
            print("Upload images...")
            upload_result = self.trainer.create_images_from_files(
                self.project.id,
                ImageFileCreateBatch(images=tagged_images_with_regions))
            if not upload_result.is_batch_successful:
                print("Image batch upload failed.")
                for image in upload_result.images:
                    print("Image status: ", image.status)
                exit(-1)
            print(
                f"Uploaded file number {file_number+1} of {len(self.files_to_upload)}"
            )
            file_number += 1
示例#17
0
training_key = "95b0a4720b9041f697a0b52d2e35cc33"
prediction_key = "cf621366042345548877b090e66aeff2"
prediction_resource_id = "/subscriptions/5d4d15d0-c922-41b5-81c3-f466c9a634bc/resourceGroups/IAProjects/providers/Microsoft.CognitiveServices/accounts/IAProjects_prediction"

publish_iteration_name = "detectModel"

trainer = CustomVisionTrainingClient(training_key, endpoint=ENDPOINT)

# Find the object detection domain
obj_detection_domain = next(
    domain for domain in trainer.get_domains()
    if domain.type == "ObjectDetection" and domain.name == "General")

# Create a new project
print("Creating project...")
project = trainer.create_project("IA-Apple-Orange",
                                 domain_id=obj_detection_domain.id)

# Make two tags in the new project
fork_tag = trainer.create_tag(project.id, "maça")
scissors_tag = trainer.create_tag(project.id, "laranja")

fork_image_regions = {
    "maca_1": [0.55, 0.46, 0.472, 0.505],
    #    "maca_2": [ 0.294117659, 0.216944471, 0.534313738, 0.5980392 ],
    #    "maca_3": [ 0.09191177, 0.0682516545, 0.757352948, 0.6143791 ],
    #    "maca_4": [ 0.254901975, 0.185898721, 0.5232843, 0.594771266 ],
    #    "maca_5": [ 0.2365196, 0.128709182, 0.5845588, 0.71405226 ],
    #    "maca_6": [ 0.115196079, 0.133611143, 0.676470637, 0.6993464 ],
    #    "maca_7": [ 0.164215669, 0.31008172, 0.767156839, 0.410130739 ],
    #    "maca_8": [ 0.118872553, 0.318251669, 0.817401946, 0.225490168 ],
    #    "maca_9": [ 0.18259804, 0.2136765, 0.6335784, 0.643790841 ],
# Custom Vision modules
from azure.cognitiveservices.vision.customvision.training import CustomVisionTrainingClient
from azure.cognitiveservices.vision.customvision.training.models import ImageFileCreateEntry
from msrest.authentication import ApiKeyCredentials

from cv_00_credentials import ENDPOINT
from cv_00_credentials import training_key

publish_iteration_name = "classifyModel"

credentials = ApiKeyCredentials(in_headers={"Training-key": training_key})
trainer = CustomVisionTrainingClient(ENDPOINT, credentials)

# Create a new project
print("Creating project...")
project = trainer.create_project("Garden Birds")

# Make tags in the new project
# Set the directory you want to start from
image_folder = os.getenv("HOME") + '/birds'
os.chdir(image_folder)
# Tag = Directory name
tags = [name for name in os.listdir('.') if os.path.isdir(name)]
print(tags)


def createTag(tag):
    result = trainer.create_tag(project.id, tag)
    print('{tag} create with id: {result}')
    return result.id
ENDPOINT = "https://packimagerecogniti-prediction.cognitiveservices.azure.com/"

# Replace with a valid key
training_key = "1d09912649e14a70a777687237b4e39d"
prediction_key = "8a6e3ffc93d84b6ab547d944612e8b9c"
prediction_resource_id = "/subscriptions/ba66f898-5c3d-473b-953e-fefd816b4264/resourceGroups/packing/providers/Microsoft.CognitiveServices/accounts/packimagerecogniti-Prediction"

publish_iteration_name = "classifyMyChildren"

credentials = ApiKeyCredentials(in_headers={"Training-key": training_key})
trainer = CustomVisionTrainingClient(ENDPOINT, credentials)

# Create a new project
print("Creating project...")
project = trainer.create_project("ClassifyMyChildren")

# Make two tags in the new project

Danny_tag = trainer.create_tag(project.id, "Danny")
Joshua_tag = trainer.create_tag(project.id, "Joshua")

base_image_url = "C:\\Users\\blessonj\\Python\\"

print("Adding images...")

image_list = []
batching_list = []

for image_num in range(1, 12):
    file_name = "Danny_{}.jpg".format(image_num)
示例#20
0
# ax1.axis('equal')  # Equal aspect ratio ensures that pie is drawn as a circle.
# plt.show()

ENDPOINT = "https://dataminingasgfour-prediction.cognitiveservices.azure.com/"
training_key = "08acd5cf3c8948829673c53d930d2f50"
prediction_key = "dfe4b7746ad74c1f98b130d7bfc5048a"
credentials = ApiKeyCredentials(in_headers={"Training-key": training_key})
trainer = CustomVisionTrainingClient(ENDPOINT, credentials)


#constants for the prediction model and tags
#libraries needed to import
import urllib
from bs4 import BeautifulSoup
import requests
project = trainer.create_project("TrumpAnalytics")
prediction_resource_id = "/subscriptions/31dad0b5-b624-4e49-815e-51e72ad9f6e4/resourceGroups/GeorgianCollege/providers/Microsoft.CognitiveServices/accounts/DataMiningAsgFour-Prediction"
publish_iteration_name = "classifyModel"
image_list = []

#First we train our model to predict if Trump is in an image

#collecting and counting the number of training images
training_image_url = r"https://github.com/robertroutledge/DataMiningFinalAssignment/raw/master/Python%20Code%20for%20Analysis/TrumpImageLibrary/"
page = requests.get(training_image_url)
soup = BeautifulSoup(page.text,'html.parser')
soupstring = str(soup)
num_training_images = soupstring.count("Box-row Box-row--focus-gray py-2 d-flex position-relative js-navigation-item")
trump_tag = trainer.create_tag(project.id, "Trump")

#iterates through the images, adds a tag
    in_headers={"Prediction-key": prediction_key})
predictor = CustomVisionPredictionClient(ENDPOINT, prediction_credentials)
# </snippet_auth>

# <snippet_create>
publish_iteration_name = "detectModel"

# Find the object detection domain
obj_detection_domain = next(
    domain for domain in trainer.get_domains()
    if domain.type == "ObjectDetection" and domain.name == "General")

# Create a new project
print("Creating project...")
# Use uuid to avoid project name collisions.
project = trainer.create_project(str(uuid.uuid4()),
                                 domain_id=obj_detection_domain.id)
# </snippet_create>

# <snippet_tags>
# Make two tags in the new project
fork_tag = trainer.create_tag(project.id, "fork")
scissors_tag = trainer.create_tag(project.id, "scissors")
# </snippet_tags>

# <snippet_tagging>
fork_image_regions = {
    "fork_1": [0.145833328, 0.3509314, 0.5894608, 0.238562092],
    "fork_2": [0.294117659, 0.216944471, 0.534313738, 0.5980392],
    "fork_3": [0.09191177, 0.0682516545, 0.757352948, 0.6143791],
    "fork_4": [0.254901975, 0.185898721, 0.5232843, 0.594771266],
    "fork_5": [0.2365196, 0.128709182, 0.5845588, 0.71405226],
    print(project.id)
    print(project.description)


# In[7]:


Project = trainer.get_project(project_id=project_id)


# ## Create the destination Project 

# In[8]:


dest_Project = dest_trainer.create_project(dest_project_new_name, domain_id=dest_classification_domain.id)


# In[9]:


for project in dest_trainer.get_projects():
    print(project.name)
    print(project.id)
    print(project.description)


# ## Get the tags on origin project and create same tags on destination project

# In[10]:
ENDPOINT = "https://canetoadmachinelearning.cognitiveservices.azure.com/"

# Replace with a valid key
training_key = "cde7deba2d5d4df5b768b50b700c46b7"
prediction_key = "fb49a542a16a47e6b68b2983db158c32"
prediction_resource_id = "/subscriptions/baa59b08-5ec4-44ea-a907-b12782d8e2a0/resourceGroups/Canetoads/providers/Microsoft.CognitiveServices/accounts/CaneToadMachineLea-Prediction"

publish_iteration_name = "classifyModel"

credentials = ApiKeyCredentials(in_headers={"Training-key": training_key})
trainer = CustomVisionTrainingClient(ENDPOINT, credentials)

# Create a new project
print("Creating project...")
project = trainer.create_project("Cane Toad Classifier Python")

# Make two tags in the new project
canetoad_tag = trainer.create_tag(project.id, "cane toad")
frog_tag = trainer.create_tag(project.id, "frog")

base_image_url = "alaPhotos/"

print("Adding images...")

image_list = []

for image_num in range(0, 5):
    file_name = "canetoad_{}.jpg".format(image_num)
    with open(base_image_url + "canetoad/" + file_name,
              "rb") as image_contents:
from azure.cognitiveservices.vision.customvision.training.models import ImageFileCreateEntry

# Eğitici servise ait endpoint bilgisi
apiEndpoint = "https://southcentralus.api.cognitive.microsoft.com"

# Bizim için üretiken traning ve prediction key değerleri
tKey = "c3a53a4fb5f24137a179f0bcaf7754a5"
pKey = "bf7571576405446782543f832b038891"

# Eğitmen istemci nesnesi tanımlanıyor. İlk parametre traning_key
# ikinci parametre Cognitive servis adresi
coach_Rives = CustomVisionTrainingClient(tKey, endpoint=apiEndpoint)

# Projeyi oluşturuyoruz
print("Lego projesi oluşturuluyor")
legoProject = coach_Rives.create_project("Agent_Leggooo")  # projemizin adı

# Şimdi deneme amaçlı tag'ler oluşturup bu tag'lere çeşitli fotoğraflar yükleyeceğiz
technic = coach_Rives.create_tag(legoProject.id, "technic")
city = coach_Rives.create_tag(legoProject.id, "city")

# Aşağıdaki tag'ler şu anda yorum satırı. Bunları açıp, create_images_from_files metodlarındaki tag_ids dizisine ekleyebiliriz.
# Ancak Vision servisi her tag için en az beş adete fotoğraf olmasını istiyor. Bu kümeyi örnekleyemediğim için sadece iki tag ile ilerledim.
'''
helicopter = coach_Rives.create_tag(legoProject.id, "helicopter")
truck = coach_Rives.create_tag(legoProject.id, "truck")
yellow = coach_Rives.create_tag(legoProject.id, "yellow")
plane = coach_Rives.create_tag(legoProject.id, "plane")
car = coach_Rives.create_tag(legoProject.id, "car")
racecar = coach_Rives.create_tag(legoProject.id, "racecar")
f1car = coach_Rives.create_tag(legoProject.id, "f1car")
示例#25
0
# Custom vision endpoint
custom_vision_endpoint = "https://eastus.api.cognitive.microsoft.com/"

vision_credentials = CognitiveServicesCredentials(COGSVCS_KEY)
vision_client = ComputerVisionClient(COGSVCS_CLIENTURL, vision_credentials)

person_group_id = 'reactor'

publish_iteration_name = "classifyModel"

credentials = ApiKeyCredentials(in_headers={"Training-key": training_key})
trainer = CustomVisionTrainingClient(custom_vision_endpoint, credentials)

# Create a new project
print("Creating project...")
project = trainer.create_project("CV Project")

# Make two tags in the new project
cruise = trainer.create_tag(project.id, "cruise")
other = trainer.create_tag(project.id, "other")

# Add the base path to the downloaded image files
base_image_url = ""

print("Adding images...")

# Image list to save images
image_list = []

# Add path in from the base_image_url where you have the cruise ship images folder
directory = os.fsencode(base_image_url + "cruise/")
示例#26
0
from azure.cognitiveservices.vision.customvision.training import CustomVisionTrainingClient
from azure.cognitiveservices.vision.customvision.training.models import ImageFileCreateEntry

cv_endpoint = "<INSERT CUSTOM VISION ENDPOINT>"
training_key = "<INSERT TRAINING KEY>"
training_images = "<LOCATION FOR THE TRAINING IMAGES>"

# Create a training client
trainer = CustomVisionTrainingClient(training_key, endpoint=cv_endpoint)

# List all available domains
for domain in trainer.get_domains():
    print(domain.id, "\t", domain.name)

# Create a project
project = trainer.create_project("Lego - Simpsons - v1",
                                 "0732100f-1a38-4e49-a514-c9b44c697ab5")

# Create the tags and add the images to a list
image_list = []
directories = os.listdir(training_images)

for tagName in directories:
    tag = trainer.create_tag(project.id, tagName)
    images = os.listdir(os.path.join(training_images, tagName))
    for img in images:
        with open(os.path.join(training_images, tagName, img),
                  "rb") as image_contents:
            image_list.append(
                ImageFileCreateEntry(name=img,
                                     contents=image_contents.read(),
                                     tag_ids=[tag.id]))
示例#27
0
from azure.cognitiveservices.vision.customvision.training.models import ImageFileCreateEntry

ENDPOINT = "https://southcentralus.api.cognitive.microsoft.com/"

# Replace with a valid key
training_key = "edb35a52aa04460d8edcffc78893ec9c3"
prediction_key = "<your prediction key>"
prediction_resource_id = "/subscriptions/b580d1b7-163e-4a91-bb43-1d260d3753a5/resourceGroups/mygroup/providers/Microsoft.CognitiveServices/accounts/Mol_Images"

publish_iteration_name = "model1"

trainer = CustomVisionTrainingClient(training_key, endpoint=ENDPOINT)

# Create a new project
print("Creating project...")
project = trainer.create_project("OPVDB_classification")

# Make two tags in the new project
mol_tag = trainer.create_tag(project.id, "mol")
curve_tag = trainer.create_tag(project.id, "curve")
# uvvis_tag = trainer.create_tag(project.id, "uvvis")
imaging_tag = trainer.create_tag(project.id, "imaging")

# To add the sample images to the project, insert the following code after the tag creation.
# This code uploads each image with its corresponding tag.
# You can upload up to 64 images in a single batch.
base_image_url = "train_img/"

print("Adding images...")

image_list = []
示例#28
0
import time

ENDPOINT = "https://australiaeast.api.cognitive.microsoft.com/"

# Replace with a valid key
training_key = "2382a5811bf34c099f3d8385d64d13da"
prediction_key = "dcf92e4f48224955a770e3e6e7390e3d"
prediction_resource_id = "/subscriptions/90ac3555-f817-4728-8e02-c5e28a30f349/resourceGroups/test/providers/Microsoft.CognitiveServices/accounts/test_prediction"

publish_iteration_name = "classifyModel"

trainer = CustomVisionTrainingClient(training_key, endpoint=ENDPOINT)

# Create a new project
print("Creating project...")
project = trainer.create_project("AR-lab")

oscilloscope_tag = trainer.create_tag(project.id, "oscilloscope")
powersupply_tag = trainer.create_tag(project.id, "powersupply")
multimeter_tag = trainer.create_tag(project.id, "multimeter")
waveformgenerator_tag = trainer.create_tag(project.id, "waveformgenerator")
phoebe_tag = trainer.create_tag(project.id, "phoebe")
base_image_url = os.getcwd() + "/"

print("Adding images...")

print("Uploading oscilloscope and power supply")
image_list = []

for image_num in range(0, 16):
    file_name = "oscilloscope{}.jpg".format(image_num)
示例#29
0
credentials = ApiKeyCredentials(in_headers={"Training-key": training_key})
trainer = CustomVisionTrainingClient(ENDPOINT, credentials)
prediction_credentials = ApiKeyCredentials(
    in_headers={"Prediction-key": prediction_key})
predictor = CustomVisionPredictionClient(ENDPOINT, prediction_credentials)

publish_iteration_name = "detectModel"

# Find the object detection domain
obj_detection_domain = next(
    domain for domain in trainer.get_domains()
    if domain.type == "ObjectDetection" and domain.name == "General")

# Create a new project
print("Creating project...")
project = trainer.create_project("My Detection Project",
                                 domain_id=obj_detection_domain.id)

# Make two tags in the new project
fork_tag = trainer.create_tag(project.id, "fork")
scissors_tag = trainer.create_tag(project.id, "scissors")

fork_image_regions = {
    "fork_1": [0.145833328, 0.3509314, 0.5894608, 0.238562092],
    "fork_2": [0.294117659, 0.216944471, 0.534313738, 0.5980392],
    "fork_3": [0.09191177, 0.0682516545, 0.757352948, 0.6143791],
    "fork_4": [0.254901975, 0.185898721, 0.5232843, 0.594771266],
    "fork_5": [0.2365196, 0.128709182, 0.5845588, 0.71405226],
    "fork_6": [0.115196079, 0.133611143, 0.676470637, 0.6993464],
    "fork_7": [0.164215669, 0.31008172, 0.767156839, 0.410130739],
    "fork_8": [0.118872553, 0.318251669, 0.817401946, 0.225490168],
    "fork_9": [0.18259804, 0.2136765, 0.6335784, 0.643790841],
示例#30
0
credentials = ApiKeyCredentials(in_headers={"Training-key": training_key})
trainer = CustomVisionTrainingClient(ENDPOINT, credentials)

# print(connect_str)
blob_service_client = BlobServiceClient.from_connection_string(connect_str)

base_image_url = "https://originaldataset.blob.core.windows.net/"

# print(list(blob_service_client.list_containers()))
ambulance_container = blob_service_client.get_container_client("ambulance")
bench_container = blob_service_client.get_container_client("bench")

# Create a new project
print("Creating project...")
project = trainer.create_project("drawings")

# Make two tags in the new project
bench_tag = trainer.create_tag(project.id, "bench")
ambulance_tag = trainer.create_tag(project.id, "ambulance")
print(ambulance_tag.id)

print("Adding images...")
url_list = []

for blob in ambulance_container.list_blobs():
    blob_name = blob.name
    blob_url = f"{base_image_url}ambulance/{blob_name}"
    url_list.append(
        ImageUrlCreateEntry(url=blob_url, tag_ids=[ambulance_tag.id]))