Exemplo n.º 1
0
    def process_batch(images):
        """
        Uploads a batch of images to Custom Vision.
        """
        batch = ImageFileCreateBatch(images=images)
        # Limited to 64 images and 20 tags per batch.
        upload_result = trainer.create_images_from_files(project_id=project_id,
                                                         batch=batch)

        num_duplicates = 0
        error_results = []
        for result in upload_result.images:
            if result.status == ImageCreateStatus.ok_duplicate:  # Ignore errors indicating duplicate images
                num_duplicates += 1
            elif result.status != ImageCreateStatus.ok:
                error_results.append(result)

        if not upload_result.is_batch_successful and len(error_results) > 0:
            message = "Batch did not upload successfully!"
            for result in error_results:
                message += f"""
                url: {result.source_url}
                -- status: {result.status}
                """
            raise Exception(message)
        elif num_duplicates > 0:
            print(
                f"Batch uploaded successfully. Ignoring {num_duplicates} duplicates."
            )
        else:
            # print("Batch uploaded successfully.")
            pass
Exemplo n.º 2
0
def main():
    """
    Training for object detection with Azure Custom Vision
    """
    args = parse_args()
    config = json.load(open(args.config, "r"))
    credentials = ApiKeyCredentials(in_headers={"Training-key": config["training_key"]})
    trainer = CustomVisionTrainingClient(config["ENDPOINT"], credentials)

    print("Creating project...")

    # Find the object detection domain
    obj_detection_domain = next(
        domain
        for domain in trainer.get_domains()
        if domain.type == "ObjectDetection" and domain.name == "General"
    )
    project = trainer.create_project(
        config["project_name"], domain_id=obj_detection_domain.id
    )

    # ======================================================================================

    print("Adding images...")
    image_folder = config["image_folder"]
    annotations = json.load(open("annotation.json", "r"))
    tagged_images_with_regions = []
    for label in annotations.keys():
        tagged_images_with_regions += add_image(
            trainer, label, project.id, annotations[label], image_folder
        )

    upload_result = trainer.create_images_from_files(
        project.id, ImageFileCreateBatch(images=tagged_images_with_regions)
    )
    if not upload_result.is_batch_successful:
        print("Image batch upload failed.")
        for image in upload_result.images:
            print("Image status: ", image.status)

    # ======================================================================================
    print("Training...")
    publish_iteration_name = config["publish_iteration_name"]
    prediction_resource_id = config["prediction_resource_id"]
    iteration = trainer.train_project(project.id)
    while iteration.status != "Completed":
        iteration = trainer.get_iteration(project.id, iteration.id)
        print("Training status: " + iteration.status)
        time.sleep(1)

    # The iteration is now trained. Publish it to the project endpoint
    trainer.publish_iteration(
        project.id, iteration.id, publish_iteration_name, prediction_resource_id
    )
    print("Done!")
Exemplo n.º 3
0
def Upload_Images(folder):
    print("Uploading images...")

    # Get the tags defined in the project
    tags = training_client.get_tags(custom_vision_project.id)

    # Create a list of images with tagged regions
    tagged_images_with_regions = []

    # Get the images and tagged regions from the JSON file
    with open('tagged-images.json', 'r') as json_file:
        tagged_images = json.load(json_file)
        for image in tagged_images['files']:
            # Get the filename
            file = image['filename']
            # Get the tagged regions
            regions = []
            for tag in image['tags']:
                tag_name = tag['tag']
                # Look up the tag ID for this tag name
                tag_id = next(t for t in tags if t.name == tag_name).id
                # Add a region for this tag using the coordinates and dimensions in the JSON
                regions.append(
                    Region(tag_id=tag_id,
                           left=tag['left'],
                           top=tag['top'],
                           width=tag['width'],
                           height=tag['height']))
            # Add the image and its regions to the list
            with open(os.path.join(folder, file), mode="rb") as image_data:
                tagged_images_with_regions.append(
                    ImageFileCreateEntry(name=file,
                                         contents=image_data.read(),
                                         regions=regions))

    # Upload the list of images as a batch
    upload_result = training_client.create_images_from_files(
        custom_vision_project.id,
        ImageFileCreateBatch(images=tagged_images_with_regions))
    # Check for failure
    if not upload_result.is_batch_successful:
        print("Image batch upload failed.")
        for image in upload_result.images:
            print("Image status: ", image.status)
    else:
        print("Images uploaded.")
Exemplo n.º 4
0
    def _upload_one_batch_training_images(self, tagged_images_with_regions):
        """
        Upload one batch (maximum 64) training images to Azure Custom Vision Object Detection.
        Only for internal use with in this class.
        
        Args:
        ----
        tagged_images_with_regions: list of ImageFileCreateEntry 
        
        """

        upload_result = self.trainer.create_images_from_files(
            self.project_id,
            ImageFileCreateBatch(images=tagged_images_with_regions))

        if not upload_result.is_batch_successful:
            print("Image batch upload failed.")
            for image in upload_result.images:
                print("Image status: ", image.status)

        return
Exemplo n.º 5
0
    def main(self) -> None:
        dataset_path = os.path.join(os.path.dirname(__file__),
                                    "../../../dataset")

        existing_image_count = self.trainer.get_image_count(
            project_id=self.project.id)
        file_number = existing_image_count
        self.files_to_upload = self.files_to_upload[file_number:]

        for file_name in self.files_to_upload:
            tagged_images_with_regions = []

            annotations: list = self._read_annotation_file(
                annotation_path=os.path.join(dataset_path, "annotations",
                                             file_name + ".txt"), )
            image_bytes: bytes = read_and_resize_image(
                image_path=os.path.join(dataset_path, "images",
                                        file_name + ".JPG"),
                max_byte_size=self.max_byte_size,
            )
            print(f"Image {file_name} is {len(image_bytes)} bytes")
            tagged_images_with_regions.append(
                ImageFileCreateEntry(name=file_name,
                                     contents=image_bytes,
                                     regions=annotations))
            print("Upload images...")
            upload_result = self.trainer.create_images_from_files(
                self.project.id,
                ImageFileCreateBatch(images=tagged_images_with_regions))
            if not upload_result.is_batch_successful:
                print("Image batch upload failed.")
                for image in upload_result.images:
                    print("Image status: ", image.status)
                exit(-1)
            print(
                f"Uploaded file number {file_number+1} of {len(self.files_to_upload)}"
            )
            file_number += 1
Exemplo n.º 6
0
for file_name in scissors_image_regions.keys():
    x, y, w, h = scissors_image_regions[file_name]
    regions = [
        Region(tag_id=scissors_tag.id, left=x, top=y, width=w, height=h)
    ]

    with open(base_image_location + "images/scissors/" + file_name + ".jpg",
              mode="rb") as image_contents:
        tagged_images_with_regions.append(
            ImageFileCreateEntry(name=file_name,
                                 contents=image_contents.read(),
                                 regions=regions))

upload_result = trainer.create_images_from_files(
    project.id, ImageFileCreateBatch(images=tagged_images_with_regions))
if not upload_result.is_batch_successful:
    print("Image batch upload failed.")
    for image in upload_result.images:
        print("Image status: ", image.status)
    exit(-1)

print("Training...")
iteration = trainer.train_project(project.id)
while iteration.status != "Completed":
    iteration = trainer.get_iteration(project.id, iteration.id)
    print("Training status: " + iteration.status)
    time.sleep(1)

# The iteration is now trained. Publish it to the project endpoint
trainer.publish_iteration(project.id, iteration.id, publish_iteration_name,
        image_list.append(
            ImageFileCreateEntry(name=file_name,
                                 contents=image_contents.read(),
                                 tag_ids=[hemlock_tag.id]))

for image_num in range(1, 11):
    file_name = "japanese_cherry_{}.jpg".format(image_num)
    with open(base_image_location + "images/Japanese Cherry/" + file_name,
              "rb") as image_contents:
        image_list.append(
            ImageFileCreateEntry(name=file_name,
                                 contents=image_contents.read(),
                                 tag_ids=[cherry_tag.id]))

upload_result = trainer.create_images_from_files(
    project.id, ImageFileCreateBatch(images=image_list))
if not upload_result.is_batch_successful:
    print("Image batch upload failed.")
    for image in upload_result.images:
        print("Image status: ", image.status)
    exit(-1)
# </snippet_upload>

# <snippet_train>
print("Training...")
iteration = trainer.train_project(project.id)
while (iteration.status != "Completed"):
    iteration = trainer.get_iteration(project.id, iteration.id)
    print("Training status: " + iteration.status)
    time.sleep(1)
print("Adding images...")

image_list = []

for image_num in range(1, 11):
    file_name = "hemlock_{}.jpg".format(image_num)
    with open(base_image_location + "images/Hemlock/" + file_name, "rb") as image_contents:
        image_list.append(ImageFileCreateEntry(name=file_name, contents=image_contents.read(), tag_ids=[hemlock_tag.id]))

for image_num in range(1, 11):
    file_name = "japanese_cherry_{}.jpg".format(image_num)
    with open(base_image_location + "images/Japanese Cherry/" + file_name, "rb") as image_contents:
        image_list.append(ImageFileCreateEntry(name=file_name, contents=image_contents.read(), tag_ids=[cherry_tag.id]))

upload_result = trainer.create_images_from_files(project.id, ImageFileCreateBatch(images=image_list))
if not upload_result.is_batch_successful:
    print("Image batch upload failed.")
    for image in upload_result.images:
        print("Image status: ", image.status)
    exit(-1)
# </snippet_upload>


# <snippet_train>
print ("Training...")
iteration = trainer.train_project(project.id)
while (iteration.status != "Completed"):
    iteration = trainer.get_iteration(project.id, iteration.id)
    print ("Training status: " + iteration.status)
    time.sleep(1)
Exemplo n.º 9
0
        dir_ = os.path.join(base_image_location, label_, mode_)
        names_ = os.listdir(dir_)
        names_ = [os.path.join(dir_, nm_) for nm_ in names_]
        names_ = [nm_ for nm_ in names_ if os.path.isfile(nm_)]
        filenames[label_][mode_] = names_
        for nm_ in names_:
            with open(nm_, 'rb') as img_:
                images[label_][mode_].append(
                    ImageFileCreateEntry(name=nm_,
                                         contents=img_.read(),
                                         tag_ids=[tag_.id]))

#upload the training data to azure
upload_result = trainer.create_images_from_files(
    project.id,
    ImageFileCreateBatch(images=images['seamus']['train'] +
                         images['finley']['train']))
if not upload_result.is_batch_successful:
    print("Image batch upload failed.")
    for image in upload_result.images:
        print("Image status: ", image.status)
    exit(-1)

#train! It's as simple as that. Might take a few minutes...
print("Training...")
iteration = trainer.train_project(project.id)
while (iteration.status != "Completed"):
    iteration = trainer.get_iteration(project.id, iteration.id)
    print("Training status: " + iteration.status)
    print("Waiting 10 seconds...")
    time.sleep(10)
                                 tag_ids=[positive_tag.id]))

for filename in os.listdir(args["noncovid"]):
    img_path = os.path.sep.join([args["noncovid"], filename])
    with open(img_path, "rb") as image_contents:
        image_list.append(
            ImageFileCreateEntry(name=filename,
                                 contents=image_contents.read(),
                                 tag_ids=[negative_tag.id]))

# Azure allows uploading 64 images per batch at once
for i in range(0, len(image_list), 64):
    print("[INFO] Uploading batch: ", i)
    batch = image_list[i:i + 64]
    upload_result = trainer.create_images_from_files(
        project.id, ImageFileCreateBatch(images=batch))
    if not upload_result.is_batch_successful:
        print("Image batch upload failed.")
        for image in upload_result.images:
            print("Image status: ", image.status)

# Train model and publish
print("[INFO] Training...")
iteration = trainer.train_project(project.id)
while (iteration.status != "Completed"):
    iteration = trainer.get_iteration(project.id, iteration.id)
    print("[INFO] Training status: " + iteration.status)
    time.sleep(1)

# The iteration is now trained. Publish it to the project endpoint
trainer.publish_iteration(project.id, iteration.id, publish_iteration_name,
Exemplo n.º 11
0
        if classification == 'with_mask':
            mask_list.append(name)
            image_list.append(
                ImageFileCreateEntry(name=name + '.png',
                                     contents=img_f.read(),
                                     tag_ids=[mask_tag.id]))
        elif classification == 'without_mask':
            no_mask_list.append(name)
            image_list.append(
                ImageFileCreateEntry(name=name + '.png',
                                     contents=img_f.read(),
                                     tag_ids=[nomask_tag.id]))

print(f"\t{len(mask_list)} photos with mask")
print(f"\t{len(no_mask_list)} photos without mask")

print("Uploading tagged images...")

# split image list into smaller portions coz batch can have max 64 images
for batch_idx in range(0, len(image_list), 64):
    print(f"\tUploading batch number {int(batch_idx / 64) + 1}...")
    upload_result = trainer.create_images_from_files(
        project.id,
        ImageFileCreateBatch(images=image_list[batch_idx:batch_idx + 64]))
    if not upload_result.is_batch_successful:
        print("Image batch upload failed.")
        for image in upload_result.images:
            print("Image status: ", image.status)

print("Upload finished.")