Пример #1
0
def check_tags(trainer: CustomVisionTrainingClient) -> None:
    t = ['none', 'Bit', 'Doug', 'Penny']
    etags = {t.name: t for t in trainer.get_tags(projectId)}
    for tag in t:
        if tag in etags:
            tags[tag] = etags[tag]
        else:
            tags[tag] = trainer.create_tag(projectId, tag)
Пример #2
0
def check_tags(trainer: CustomVisionTrainingClient) -> None:
    t = ['touching-face', 'not-touching-face', 'no-face-present']
    etags = {t.name: t for t in trainer.get_tags(projectId)}
    for tag in t:
        if tag in etags:
            tags[tag] = etags[tag]
        else:
            tags[tag] = trainer.create_tag(projectId, tag)
Пример #3
0
def check_tags(trainer: CustomVisionTrainingClient) -> None:
    t = ['none', 'rock', 'paper', 'scissors', 'lizard', 'spock']
    etags = {t.name: t for t in trainer.get_tags(projectId)}
    for tag in t:
        if tag in etags:
            tags[tag] = etags[tag]
        else:
            tags[tag] = trainer.create_tag(projectId, tag)
Пример #4
0
def getPredictionBatch(ENDPOINT, publish_iteration_name, prediction_key,
                       prediction_resource_id, file, training_key,
                       project_name):
    # Now there is a trained endpoint that can be used to make a prediction
    prediction_credentials = ApiKeyCredentials(
        in_headers={"Prediction-key": prediction_key})
    training_credentials = ApiKeyCredentials(
        in_headers={"Training-key": training_key})
    predictor = CustomVisionPredictionClient(ENDPOINT, prediction_credentials)
    trainer = CustomVisionTrainingClient(ENDPOINT, training_credentials)
    projects = trainer.get_projects()

    res_batch = {}
    js_res = {}

    #Retrieve the object dection project and its tags
    #Current assumes one tag
    for p in projects:
        if p.name == project_name:
            project = trainer.get_project(p.id)
            tags = trainer.get_tags(project.id)
            print('Project Found')

    for url in file:
        info = url.split()

        name = info[0]
        url = info[1]
        try:
            response = requests.get(url)
        except:
            print("error retrieving image: " + url)
            exit(-1)
        # Open the sample image and get back the prediction results.
        results = predictor.detect_image(project.id, publish_iteration_name,
                                         response.content)

        # Display the results.
        js_res["vehicle"] = []
        for prediction in results.predictions:

            x = {
                "confidence": "{0:.2f}%".format(prediction.probability * 100),
                "bbox_left": "{0:.2f}".format(prediction.bounding_box.left),
                "bbox_right": "{0:.2f}".format(prediction.bounding_box.top),
                "bbox_width": "{0:.2f}".format(prediction.bounding_box.width),
                "bbox_height": "{0:.2f}".format(prediction.bounding_box.height)
            }

            x = json.dumps(x)
            js_res[prediction.tag_name].append(x)
        res_batch[name] = js_res

    return res_batch
Пример #5
0
def main(req: func.HttpRequest) -> func.HttpResponse:
    logging.info(
        'ML Professoar HTTP trigger function AddLabeledDataClient processed a request.'
    )

    try:
        # retrieve the parameters from the multi-part form http request
        image_url = req.form.get('ImageUrl')
        image_labeling_json = req.form.get('DataLabels')
    except:
        return func.HttpResponse(
            "Please pass JSON containing the labeled regions associated with this image on the query string or in the request body.",
            status_code=400)

    # validate both parameters were passed into the function
    if image_url and image_labeling_json:
        labels = []
        count_of_labels_applied_to_image = 0

        endpoint = os.environ['ClientEndpoint']

        # Get Cognitive Services Environment Variables
        project_id = os.environ["ProjectID"]
        training_key = os.environ['TrainingKey']

        # load labeled image regions passed in request into dictionary
        image_labeling_data = json.loads(image_labeling_json)

        # instanciate custom vision client
        trainer = CustomVisionTrainingClient(training_key, endpoint=endpoint)

        # retrieve tags from project and loop through them to find tag ids for tags that need to be applied to the image
        tags = trainer.get_tags(project_id)
        image_label = image_labeling_data["label"]
        for tag in tags:
            if tag.name == image_label:
                labels.append(tag.id)
                count_of_labels_applied_to_image = count_of_labels_applied_to_image + 1
                break

        # create the image from a url and attach the appropriate tags as labels.
        upload_result = trainer.create_images_from_urls(
            project_id, [ImageUrlCreateEntry(url=image_url, tag_ids=labels)])
        if upload_result.is_batch_successful:
            return func.HttpResponse(
                str(count_of_labels_applied_to_image) +
                " Tag(s) applied to image at url: " + image_url)
        else:
            return func.HttpResponse("Upload of " + image_url + " failed.")

    else:
        return func.HttpResponse(
            "Please pass a URL to a blob file containing the image to be added to training in this request on the query string.",
            status_code=400)
Пример #6
0
def main(req: func.HttpRequest) -> func.HttpResponse:
    logging.info('Python HTTP trigger function processed a request.')

    try:
        data = req.get_json()
        farmid, runid = data['farmid'], data['runid']
        feedback_list = data['feedback']

        #Decide on the correct split
        for feedback in feedback_list:
            feedback["split"] = "train" if random.random() < 0.5 else "dev"

        #Get Project ID and CustomVision Trainer
        logging.info("Get Project with Project ID : " +
                     CVTrainConstants.PROJECT_ID)
        credentials = ApiKeyCredentials(
            in_headers={"Training-key": CVTrainConstants.TRAINING_KEY})
        trainer = CustomVisionTrainingClient(CVTrainConstants.ENDPOINT,
                                             credentials)

        #Get list of tags in the current project
        tag_dict = {}
        for tag in trainer.get_tags(
                CVTrainConstants.PROJECT_ID):  ##Can modify dictionary later
            tag_dict[tag.name] = tag.id

        train_image_list = add_train_dev_images(farmid, runid, feedback_list,
                                                tag_dict)
        #train_image_list = add_train_dev_images("1", "2", [{"grid_id": "3", "label": "Charlock", "split": "dev"}, \
        #{"grid_id": "7", "label": "Fat Hen", "split": "train"}], tag_dict)

        #Upload to Custom Vision with new tags from train
        if len(train_image_list) != 0:  # IF not empty
            upload_result = trainer.create_images_from_urls(
                CVTrainConstants.PROJECT_ID, images=train_image_list)
            if not upload_result.is_batch_successful:
                logging.error("Image batch upload failed.")
            else:
                logging.info("Image batch upload Success")

        best_iteration(trainer)

    except Exception as e:
        logging.error(
            "Exception while trying to get data from HTTP Request: " + str(e))
        return func.HttpResponse(f"Failure !")

    return func.HttpResponse(f"Trained Sucessfully")
Пример #7
0
def get_tags_for_project(angle):
    trainer = CustomVisionTrainingClient(prediction_key, endpoint=endpoint)

    project_id = None
    if angle == 'wet-head-right':
        project_id = config.HEAD_RIGHT
    if angle == 'wet-head-left':
        project_id = config.HEAD_LEFT
    if angle == 'bottling-left':
        project_id = config.BOTTLING_LEFT
    if angle == 'bottling-straight':
        project_id = config.BOTTLING_STRAIGHT
    if angle == 'bottling-right':
        project_id = config.BOTTLING_RIGHT

    results = trainer.get_tags(project_id)

    return results
def upload_images(training_key):
    trainer = CustomVisionTrainingClient(training_key, endpoint=ENDPOINT)

    # Find the object detection domain
    obj_detection_domain = next(domain for domain in trainer.get_domains() if domain.type == "ObjectDetection")

    print("Creating project...")
    try:
        project = trainer.create_project("LEGO Vision", domain_id=obj_detection_domain.id)
    except HttpOperationError:
        print("Project already exists. Using this one.")
        project = trainer.get_project(project_id="71548120-925d-4e59-ba7e-32f99de50240")

    classes = os.path.join(BASE_DIRECTORY, "class_map.txt")
    tags = dict()
    # Make two tags in the new project
    for _class in list(map(lambda line: line.split('\t')[0], open(classes).readlines())):
        try:
            tags[_class] = trainer.create_tag(project.id, _class)
        except HttpOperationError:
            print("Tag already created, continuing...")
            for tag in trainer.get_tags(project_id="71548120-925d-4e59-ba7e-32f99de50240"):
                tags[tag.name] = tag

    # Go through the data table above and create the images
    print("Adding images...")
    tagged_images_with_regions = []

    for image_path in glob.glob(os.path.join(IMAGES_FOLDER, "*.jpg")):
        file_id, extension = image_path.split(".", 1)
        image = cv2.imread(image_path)
        bboxes = read_bboxes(os.path.join(IMAGES_FOLDER, file_id + ".bboxes.tsv"), scale=1, padding=(0, 0, 0, 0))
        labels = read_labels(os.path.join(IMAGES_FOLDER, file_id + ".bboxes.labels.tsv"))
        regions = [Region(tag_id=tags[_class].id, left=bbox[0] / image.shape[1], top=bbox[1] / image.shape[0],
                          width=abs(bbox[0] - bbox[2]) / image.shape[1], height=abs(bbox[1] - bbox[3]) / image.shape[0])
                   for _class, bbox in zip(labels,
                                           bboxes)]
        with open(image_path, mode="rb") as image_contents:
            tagged_images_with_regions.append(ImageFileCreateEntry(name=file_id, contents=image_contents.read(),
                                                                   regions=regions))
    print("Azure Custom Vision can only accept images in batches of max 64 per batch. Cutting list up in batches..")
    for batch in chunks(tagged_images_with_regions, 64):
        trainer.create_images_from_files(project.id, images=batch, tag_ids=[tag.id for tag in tags.values()])
    print("Finished adding images. Visit customvision.ai to start training via the GUI.")
    def add_label_if_not_exists(self, trainer: CustomVisionTrainingClient, project: Project, tag_name: str)->Tag:
        """
        Adds a tag to the cache

        :param trainer: The project trainer
        :type: azure.cognitiveservices.vision.customvision.training.CustomVisionTrainingClient
        :param project: the Custom Vision project
        :type: string
        :param tag_name: the name of the tag to add
        :type: azure.cognitiveservices.vision.customvision.training.models.Project
        :returns: string
        """
        if len(self.tag_dictionary) == 0:
            tags=trainer.get_tags(project.id)
            for tag in tags:
                self.tag_dictionary[tag.name] = tag

        if tag_name not in self.tag_dictionary:
            self.tag_dictionary[tag_name] = trainer.create_tag(project.id, tag_name)

        return self.tag_dictionary[tag_name]
Пример #10
0
def getPrediction(ENDPOINT, publish_iteration_name, prediction_key,
                  prediction_resource_id, img, training_key, project_name):

    # Now there is a trained endpoint that can be used to make a prediction
    prediction_credentials = ApiKeyCredentials(
        in_headers={"Prediction-key": prediction_key})
    training_credentials = ApiKeyCredentials(
        in_headers={"Training-key": training_key})
    predictor = CustomVisionPredictionClient(ENDPOINT, prediction_credentials)
    trainer = CustomVisionTrainingClient(ENDPOINT, training_credentials)
    projects = trainer.get_projects()

    #Retrieve the object dection project and its tags
    #Current assumes one tag
    for p in projects:
        if p.name == project_name:
            project = trainer.get_project(p.id)
            tags = trainer.get_tags(project.id)
            print('Project Found')

    # Open the sample image and get back the prediction results.
    results = predictor.detect_image(project.id, publish_iteration_name, img)
    drawBounds(img, results.predictions)

    # Display the results.
    js_res = defaultdict(list)
    for prediction in results.predictions:
        print(prediction)

        x = {
            "confidence": "{0:.2f}%".format(prediction.probability * 100),
            "bbox_left": "{0:.2f}".format(prediction.bounding_box.left),
            "bbox_right": "{0:.2f}".format(prediction.bounding_box.top),
            "bbox_width": "{0:.2f}".format(prediction.bounding_box.width),
            "bbox_height": "{0:.2f}".format(prediction.bounding_box.height)
        }

        js_res[prediction.tag_name].append(x)

    return js_res
Пример #11
0
def main(secrets, directory):
    with open(secrets) as f:
        keys = json.load(f)

    print(keys.keys())

    projectid = keys['projectid']
    endpoint = keys['endpoint']
    key = keys['key']

    credentials = ApiKeyCredentials(in_headers={"Training-key": key})
    trainer = CustomVisionTrainingClient(endpoint, credentials)

    if os.path.exists(directory):
        shutil.rmtree(directory)
    os.makedirs(directory)

    tags = trainer.get_tags(project_id=projectid)
    for tag in tags:
        os.makedirs(os.path.join(directory, tag.name))

    skip = 0
    images = trainer.get_tagged_images(project_id=projectid,
                                       take=50,
                                       skip=skip)
    while (len(images) > 0):
        for img in images:
            new_image = os.path.join(directory, img.tags[0].tag_name,
                                     f'{str(uuid.uuid4()).lower()}.png')
            print(f'\rdownloading {img.original_image_uri} to {new_image}',
                  end="")
            request.urlretrieve(url=img.original_image_uri, filename=new_image)
        skip += 50
        images = trainer.get_tagged_images(project_id=projectid,
                                           take=50,
                                           skip=skip)
    print('\nDone!')
Пример #12
0
class LabelUtility:
    """ Utility for interacting with the Custom Vision label tool.
    """
    def __init__(self, ws_name, project_id, project_key):
        endpoint_url = "https://{}.cognitiveservices.azure.com/".format(
            ws_name)
        self.project_id = project_id
        self.client = CustomVisionTrainingClient(project_key,
                                                 endpoint=endpoint_url)
        self.project = self.client.get_project(project_id=project_id)
        self.tags = self.client.get_tags(project_id=project_id)

    def upload_directory(self,
                         data_dir,
                         img_ext="*.jpg",
                         img_dir="images",
                         lbl_file="labels.csv",
                         default_tag_name="important"):
        """
        upload_directory - Upload images from a given directory into the CV workspace

        :param str data_dir: Source folder of the files.
        :param str img_ext: image extension.
        :param str img_dir: image folder.
        :param str lbl_file: labels file.
        :param str default_tag_name: default tag name.

        :returns: None

        """
        label_fn = os.path.join(data_dir, lbl_file)
        img_folder = os.path.join(data_dir, img_dir)

        # Check if required folders exist.
        if not (os.path.isdir(img_folder) and os.path.exists(label_fn)):
            print("Input data not found")
            return

        # Read labels and image list.
        labels_df = pd.read_csv(os.path.join(label_fn))
        image_list = glob.glob(os.path.join(img_folder, img_ext))

        # Upload each image with regions
        for _, path in enumerate(image_list):
            tagged_images_with_regions = []
            regions = []

            file_name = path.split("\\")[-1]
            img = Image.open(path)
            img_width, img_height = img.size

            for _, row in labels_df[labels_df.FileName ==
                                    file_name].iterrows():
                x, y, w, h = row.XMin, row.YMin, row.XMax - row.XMin, row.YMax - row.YMin
                x = x / img_width
                w = w / img_width
                y = y / img_height
                h = h / img_height

                if "DefectType" in row:
                    default_tag_name = row.DefectType

                tag = None
                for a_tag in self.tags:
                    if a_tag.name == default_tag_name:
                        tag = a_tag

                if not tag:
                    tag = self.client.create_tag(self.project_id,
                                                 default_tag_name)
                    self.tags = self.client.get_tags(self.project_id)

                regions.append(
                    Region(tag_id=tag.id, left=x, top=y, width=w, height=h))

            with open(path, mode="rb") as image_contents:
                tagged_images_with_regions.append(
                    ImageFileCreateEntry(name=file_name,
                                         contents=image_contents.read(),
                                         regions=regions))

            upload_result = self.client.create_images_from_files(
                self.project.id, images=tagged_images_with_regions)
            if not upload_result.is_batch_successful:
                print("Image batch upload failed.")
                for image in upload_result.images:
                    print("Image status: ", image.status)

    def export_images(self, data_dir, img_dir="images", lbl_file="labels.csv"):
        """
        export_images - Export any tagged images that may exist
        and preserve their tags and regions.

        :param str data_dir: Output folder.
        :param str img_ext: image extension.
        :param str img_dir: image folder.
        :param str lbl_file: labels file.

        :returns: None

        """
        img_folder = os.path.join(data_dir, img_dir)
        # Check if required folders exist.
        if not os.path.isdir(img_folder):
            print("Output folder not found")
            return
        count = self.client.get_tagged_image_count(self.project_id)
        print("Found: ", count, " tagged images.")
        exported, idx = 0, 0
        data = []
        while count > 0:
            count_to_export = min(count, 256)
            print("Getting", count_to_export, "images")
            images = self.client.get_tagged_images(self.project_id,
                                                   take=count_to_export,
                                                   skip=exported)
            for image in images:
                file_name = f'file_{idx}.jpg'
                img_fname = os.path.join(img_folder, file_name)
                data += self.download_image(image, img_fname)
                idx += 1

            exported += count_to_export
            count -= count_to_export
        df = pd.DataFrame(data,
                          columns=[
                              "image_name", "DefectName", "xmin", "xmax",
                              "ymin", "ymax"
                          ])
        classes = sorted(list(set(df['DefectName'])))
        class_ids = {}
        f = open(os.path.join(data_dir, 'label_map.pbtxt'), "w+")
        for i, clas in enumerate(classes):
            class_ids[clas] = i + 1
            f.write('item {\n')
            f.write('\tid: ' + str(i + 1) + '\n')
            f.write('\tname: \'' + clas + '\'\n')
            f.write('}\n')
            f.write('\n')
        f.close()
        df['classid'] = [
            class_ids[the_defect] for the_defect in df['DefectName']
        ]
        df.to_csv(os.path.join(data_dir, lbl_file), index=False)

    @staticmethod
    def download_image(image, img_fname):
        """
        download_image - Export an image.

        :param pyImg3 image: Image object.
        :param str img_fname: Filename of the image.
        :returns: None

        """
        regions = []
        if hasattr(image, "regions"):
            regions = image.regions
        url = image.original_image_uri
        width = image.width
        height = image.height

        # Download the image
        responseFromURL = req.get(url).content
        with open(img_fname, 'wb') as f:
            f.write(responseFromURL)
            f.close()

        # Format the regions
        data = []
        for r in regions:
            left, top, wide, high = r.left, r.top, r.width, r.height
            left = left * width
            top = top * height
            wide = wide * width
            high = high * height
            data.append([
                img_fname.split("\\")[-1], r.tag_name,
                int(left),
                int(left + wide),
                int(top),
                int(top + high)
            ])
        return data
Пример #13
0
            project_id = project.id
        except HttpOperationError as err:
            sys.exit(
                "It seems that you have the project already. (or it can be other error.)"
            )

        # Make tags in the new project
        all_tags = [
            trainer.create_tag(project.id, "Apple"),
            trainer.create_tag(project.id, "Banana"),
            trainer.create_tag(project.id, "Mango"),
            trainer.create_tag(project.id, "Pear"),
            trainer.create_tag(project.id, "Pomelo")
        ]
    else:
        all_tags = trainer.get_tags(project_id)

    with open('../id.json', 'r') as f:
        index_to_class = json.load(f)
    tag_name_to_id = {t.name.lower(): t.id for t in all_tags}
    tag_index_to_id = {
        index: tag_name_to_id[v.lower()]
        for index, v in index_to_class.items()
    }

    with open(IMAGES_PATH + "/anno.json") as data_file:
        data = json.load(data_file)

    # DIMENSION = (600, 800)

    # # transform to normalized coordinate
Пример #14
0
    def trainModel(self, database, modelID, parameters, onMessage, onFinished):
        onMessage("Trainer fetching model settings.")
        session = database.cursor()
        session.execute(
            "SELECT remote_id, training_data, extra_info FROM " +
            self._datatableName + " WHERE id = %s", (modelID, ))
        result = session.fetchone()
        session.close()

        if result:
            projectID, trainingData, _ = result

            onMessage("Training starting...")

            onMessage("Retrieving model...")
            trainer = CustomVisionTrainingClient(self._trainingKey,
                                                 endpoint=self._endPoint)
            project = trainer.get_project(projectID)

            onMessage("Downloading/Caching and Analyzing training data...")

            imageList = []
            dataClassList = {}

            try:
                start = time.time()
                # retrieve information of created tags
                createdTags = trainer.get_tags(projectID)
                for tag in createdTags:
                    dataClassList[tag.name] = tag

                imageOK = 0
                imageFailed = 0
                imageTotal = len(trainingData)

                def visualizeImageDownload():
                    return "(" + str(imageOK) + "/" + str(
                        imageFailed) + "/" + str(imageTotal) + ")"

                for photoID in trainingData:
                    image, _, err = self._serverAPI.getResource(
                        database, photoID)

                    if err:
                        imageFailed += 1
                        onMessage("Failed to download image " + str(photoID) +
                                  ". Error: " + err + " " +
                                  visualizeImageDownload())
                    else:
                        imageOK += 1

                        classOfData = str(trainingData[photoID])

                        # create tag if not exists
                        if classOfData not in dataClassList:
                            dataClassList[classOfData] = trainer.create_tag(
                                project.id, classOfData)

                        isOK, encodedImage = cv2.imencode('.png', image)
                        imageList.append(
                            ImageFileCreateEntry(
                                name=str(photoID) + ".png",
                                contents=encodedImage,
                                tag_ids=[dataClassList[classOfData].id]))

                        onMessage(visualizeImageDownload())
                end = time.time()
                onMessage("Image caching done. Used: " + str(end - start))

                start = time.time()
                for i in range(0, len(imageList), 64):
                    batch = imageList[i:i + 64]
                    upload_result = trainer.create_images_from_files(
                        project.id, images=batch)

                    if not upload_result.is_batch_successful:
                        onMessage("Image batch upload failed.")

                        for image in upload_result.images:
                            onMessage("Image status: ", image.status)

                        onFinished(False)
                        return
                end = time.time()
                onMessage("Image upload done. Used: " + str(end - start))

                onMessage("Training model with " + str(imageOK) + " photos...")

                iteration = trainer.train_project(project.id)
                while (iteration.status != "Completed"):
                    iteration = trainer.get_iteration(project.id, iteration.id)
                    onMessage("Training status: " + iteration.status)
                    time.sleep(3)

                # The iteration is now trained. Publish it to the project endpoint
                trainer.publish_iteration(project.id, iteration.id, projectID,
                                          self._resourceID)
                onMessage("Training done.")
                onFinished(True)

            except Exception as err:
                onMessage("Failed to train.")
                onMessage("Error Message: " + str(err))
                onFinished(False)
        else:
            onMessage("The trainer can't recognize the given model any more.")
            onFinished(False)
    in_headers={
        "Training-key": "APIキー"
        }
    )

# 認証用クラスと呼び出し先のエンドポイントURLを指定して通信クラスをインスタンス化する
trainer = CustomVisionTrainingClient(
    "エンドポイントURL",
    training_credentials
    )

# プロジェクトIDをコンソールから入力してプロジェクトを呼び出し、プロジェクトに登録されているタグ名とタグIDを取得してコンソールに表示する
# メモ1:タグのIDをCustomVisionのプロジェクトサイトから確認できないため、APIを呼び出して取得する
project_ID = input("プロジェクトIDを入力してください >> ")
print("====プロジェクトに登録されているタグの一覧====")
for target_tag in trainer.get_tags(project_ID):
    print("Tag_ID: " + target_tag.id + ", Tag_Name: " + target_tag.name)
print("=========================================")

# 登録する画像のリストを格納する配列の変数を宣言する
image_list = []

# タグ付けされた登録用の画像データを作成する
# メモ2:複数のファイルを同時に登録したい場合は、Image_listに要素を追加するだけでOK
# メモ3:タグIDも複数設定できる(ソースコードが複雑になるので、今回は一つしか登録できない)
image_list.append(
    ImageFileCreateEntry(
        name=input("CustomVisionに登録する画像のファイル名を入力してください >> "),
        contents=open(input("CustomVisionに登録する画像のファイルパスを入力してください >> "), mode="rb").read(),
        tag_ids=[input("画像に紐付けるタグのIDを入力してください >> ")]
    )
def main(req: func.HttpRequest) -> func.HttpResponse:
    logging.info(
        'ML Professoar HTTP trigger function AddLabeledDataClient processed a request.'
    )

    dataBlobUrl = req.params.get('dataBlobUrl')
    if not dataBlobUrl:
        return func.HttpResponse(
            "Please pass a URL to a blob containing the image to be added to training in this request on the query string.",
            status_code=400)

    if dataBlobUrl:
        ImageLabelsJson = req.params.get('imageLabels')
        if not ImageLabelsJson:
            try:
                ImageLabelsJson = req.get_json()
            except:
                return func.HttpResponse(
                    "Please pass JSON containing the labels associated with this image on the query string or in the request body.",
                    status_code=400)

        if ImageLabelsJson:
            # https://pypi.org/project/custom_vision_client/
            # https://github.com/Azure-Samples/cognitive-services-python-sdk-samples/blob/master/samples/vision/custom_vision_training_samples.py

            Labels = []
            CountOfTagsAppliedToTimage = 0
            Endpoint = os.environ['clientEndpoint']

            # Get Cognitive Services Environment Variables
            ProjectID = os.environ["projectID"]
            TrainingKey = os.environ['trainingKey']

            # strip out list of Image Labels passed in request
            ImageLabels = json.loads(ImageLabelsJson)

            # retrieve tags from project and loop through them to find tag ids for tags that need to be applied to the image
            Trainer = CustomVisionTrainingClient(TrainingKey,
                                                 endpoint=Endpoint)
            Tags = Trainer.get_tags(ProjectID)
            for ImageLabel in ImageLabels:
                for Tag in Tags:
                    if Tag.name == ImageLabel:
                        Labels.append(Tag.id)
                        CountOfTagsAppliedToTimage = CountOfTagsAppliedToTimage + 1
                        break

            # create the image from a url and attach the appropriate tags as labels.
            upload_result = Trainer.create_images_from_urls(
                ProjectID,
                [ImageUrlCreateEntry(url=dataBlobUrl, tag_ids=Labels)])
            if upload_result.is_batch_successful:
                return func.HttpResponse(
                    str(CountOfTagsAppliedToTimage) +
                    " Tag(s) applied to image at url: " + dataBlobUrl)
            else:
                return func.HttpResponse("Upload of " + dataBlobUrl +
                                         " failed.")

    else:
        return func.HttpResponse(
            "Please pass a URL to a blob file containing the image to be added to training in this request on the query string.",
            status_code=400)
Пример #17
0
class CustomVisionBlobUploader(object):
    
    _BATCH_SIZE = 64
    _WORKER_CONCURRENCY = multiprocessing.cpu_count() * 8

    def __init__(
        self,
        storage_acct_name, storage_acct_key, storage_container_name, storage_prefix,
        cv_endpoint, cv_projectid, cv_training_key, tags
    ):
        # Configure Blob Storage Instance
        self._storage_acct_name = storage_acct_name
        self._storage_acct_key = storage_acct_key
        self._storage_container_name = storage_container_name
        self._block_blob_service = BlockBlobService(account_name=storage_acct_name, account_key=storage_acct_key)

        # Configure Custom Vision Instance
        self._cv_endpoint = "https://" + str(cv_endpoint) + ".api.cognitive.microsoft.com"
        self._cv_projectid = cv_projectid
        self._cv_training_key = cv_training_key
        self.trainer = CustomVisionTrainingClient(cv_training_key, endpoint=self._cv_endpoint)
        self.project = self.trainer.get_project(cv_projectid)

        # Handle tags that may not exist, get tags from project
        cv_tag_names = [tag.name for tag in self.trainer.get_tags(cv_projectid)]
        for tag in tags:
            if tag not in cv_tag_names:
                self.trainer.create_tag(cv_projectid, tag, description=tag, type="Regular")
        self.tags_dict = {tag.name:tag for tag in self.trainer.get_tags(cv_projectid)}

    def start_timer(self):
        self.START_TIME = default_timer()

    def load_blob_batches(self, prefix):
        generator = self._block_blob_service.list_blobs(self._storage_container_name, prefix=prefix)
        blob_list = [blob.name for blob in generator]
        batches = [blob_list[i * self._BATCH_SIZE:(i + 1) * self._BATCH_SIZE] for i in range((len(blob_list) + self._BATCH_SIZE - 1) // self._BATCH_SIZE )]
        self.batches = batches

    def get_blob_filename(self,blob_path):
        return blob_path.split('/')[-1]

    def get_blob(self, bbs, blob_name):
        blob = bbs.get_blob_to_bytes(self._storage_container_name, blob_name)
        # print elapsed time
        elapsed = default_timer() - self.START_TIME
        time_completed_at = "{:5.2f}s".format(elapsed)
        print("{0:<30} {1:>20}".format(blob_name, time_completed_at))
        return blob

    async def get_blob_asynchronous(self,batch_input,batch_output_list, tags):
        print("{0:<30} {1:>20}".format("File", "Completed at"))
        with ThreadPoolExecutor(max_workers=self._WORKER_CONCURRENCY) as executor:

            loop = asyncio.get_event_loop()
            # self.START_TIME = default_timer()
            tasks = [
                loop.run_in_executor(
                    executor,
                    self.get_blob,
                    *(self._block_blob_service, blob)
                )
                for blob in batch_input
            ]
            for response in await asyncio.gather(*tasks):
                customvision_image = ImageFileCreateEntry(name=self.get_blob_filename(response.name),contents=response.content,tag_ids=[self.tags_dict.get(tag).id for tag in tags])
                batch_output_list.append(customvision_image)
    arg_parser = argparse.ArgumentParser()
    arg_parser.add_argument("-p", "--project", action="store", type=str, help="Project ID", dest="project_id", default=None)
    arg_parser.add_argument("-k", "--key", action="store", type=str, help="Training-Key", dest="training_key", default=None)
    arg_parser.add_argument("-e", "--endpoint", action="store", type=str, help="Endoint", dest="endpoint", default="https://southcentralus.api.cognitive.microsoft.com")
    arg_parser.add_argument("-d", "--dir", action="store", type=str, help="Target directory", dest="dir", default=".\\")
    args = arg_parser.parse_args()

    if (not args.project_id or not args.training_key):
        arg_parser.print_help()
        exit(-1)

    print ("Collecting information for source project:", args.project_id)

    # Create hte client
    trainer = CustomVisionTrainingClient(args.training_key, endpoint=args.endpoint)

    # Get the project
    project = trainer.get_project(args.project_id)
    print ("Downloading project:", project.name)
    print ("\tDescription: ", project.description)
    print ("\tDomain: ", project.settings.domain_id)

    # get tags
    tags = trainer.get_tags(project.id)
    print ("Found:", len(tags), "tags.")
    with open("tags.txt", "wt") as tags_file:
        for tag in tags:
            print(tag.name, tag.id, tag.type, file = tags_file)

    download_images(trainer, project.id, args.dir)
# In[9]:


for project in dest_trainer.get_projects():
    print(project.name)
    print(project.id)
    print(project.description)


# ## Get the tags on origin project and create same tags on destination project

# In[10]:


dest_tags = []
for tag in trainer.get_tags(Project.id):
    dest_tags.append(dest_trainer.create_tag(dest_Project.id, tag.name))
    print(tag.name)


# In[11]:


dest_tags_dict = {}
dest_tag_ids = []
for tag in dest_tags:
    dest_tags_dict[tag.name] = tag.id
    dest_tag_ids.append(tag.id)

print(dest_tags_dict)
print(dest_tag_ids)
Пример #20
0
class AzureCVObjectDetectionAPI(object):
    """
     A warper class for simplifying the use of Azure Custom Vision Object Detections
    """
    def __init__(self, endpoint, key, resource_id, project_id=None):
        """ 
        Class Constructor, takes the id from Azure Custom Vision. Here the key will
        be used both for training and predicition
        
        Args:
        ----
        endpoint: str
        key: str
        resource_id: str
        project_id: str
        """

        training_credentials = ApiKeyCredentials(
            in_headers={"Training-key": key})
        prediction_credentials = ApiKeyCredentials(
            in_headers={"Prediction-key": key})

        self.trainer = CustomVisionTrainingClient(endpoint,
                                                  training_credentials)
        self.predictor = CustomVisionPredictionClient(endpoint,
                                                      prediction_credentials)
        self.project_id = project_id
        self.tags = {}

        if project_id is not None:
            for t in self.trainer.get_tags(project_id):
                self.tags[t.name] = t.id

        return

    def create_project(self, project_name):
        """
        Create a object detection project with name as project_name. Swith to this project
        when creation is complete.

        Args:
        ----
        project_name: str
        """
        # Find the object detection domain
        obj_detection_domain = next(
            domain for domain in trainer.get_domains()
            if domain.type == "ObjectDetection" and domain.name == "General")

        # Create a new project
        print("Creating project...")
        project = trainer.create_project(project_name,
                                         domain_id=obj_detection_domain.id)
        self.project_id = project.id

        return

    def create_tag(self, tag_name):
        """
        Create a tag at the current object detection project.

        Args:
        ----
        project_name: str
        """
        assert (self.project_id is not None)
        tag = self.trainer.create_tag(self.project_id, tag_name)
        self.tags[tag.name] = tag.id

        return

    def _upload_one_batch_training_images(self, tagged_images_with_regions):
        """
        Upload one batch (maximum 64) training images to Azure Custom Vision Object Detection.
        Only for internal use with in this class.
        
        Args:
        ----
        tagged_images_with_regions: list of ImageFileCreateEntry 
        
        """

        upload_result = self.trainer.create_images_from_files(
            self.project_id,
            ImageFileCreateBatch(images=tagged_images_with_regions))

        if not upload_result.is_batch_successful:
            print("Image batch upload failed.")
            for image in upload_result.images:
                print("Image status: ", image.status)

        return

    def upload_training_images(self, training_labeled_images):
        """
        Upload training images to Azure Custom Vision Object Detection.
        
        Args:
        ----
        training_lableded_images: list of labeledImage
        """
        assert (self.project_id is not None)

        print("Adding images...")
        tagged_images_with_regions = []
        batch = 0

        for i in range(len(training_labeled_images)):
            if i > 0 and (i % 64) == 0:
                batch += 1
                print("Adding images: batch ", batch)
                self._upload_one_batch_training_images(
                    tagged_images_with_regions)
                tagged_images_with_regions = []

            # accumulating labels within one batch
            labeled_img = training_labeled_images[i]

            for t, labels in labeled_img.labels.items():

                if t not in self.tags.keys(): self.create_tag(t)

                tag_id = self.tags[t]

                regions = []
                for m in labels:
                    x, y, w, h = normalize_coordinates(m, labeled_img.shape)
                    regions.append(
                        Region(tag_id=tag_id, left=x, top=y, width=w,
                               height=h))

            with open(labeled_img.path, mode="rb") as image_contents:
                tagged_images_with_regions.append(
                    ImageFileCreateEntry(name=labeled_img.name,
                                         contents=image_contents.read(),
                                         regions=regions))

        batch += 1
        if len(tagged_images_with_regions) > 0:
            print("Adding images: batch ", batch)
            self._upload_one_batch_training_images(tagged_images_with_regions)

        return
Пример #21
0
def main(req: func.HttpRequest) -> func.HttpResponse:
    logging.info(
        'MLP Pbject Detection HTTP trigger function AddLabeledData processed a request.'
    )

    try:
        image_url = req.form.get('ImageUrl')
        image_labeling_json = req.form.get('DataLabels')
    except:
        return func.HttpResponse(
            "Please pass JSON containing the labeled regions associated with this image on the query string or in the request body.",
            status_code=400)

    if image_url and image_labeling_json:
        labels = []
        labeled_images_with_regions = []
        count_of_regions_applied_to_image = 0
        count_of_labels_applied_to_region = 0

        endpoint = os.environ['ClientEndpoint']

        # Get Cognitive Services Environment Variables
        project_id = os.environ["ProjectID"]
        training_key = os.environ['TrainingKey']

        # load labeled image regions passed in request into dictionary
        image_labeling_data = json.loads(image_labeling_json)

        # instanciate custom vision client
        trainer = CustomVisionTrainingClient(training_key, endpoint=endpoint)

        # get list of valid tags for this model
        tags = trainer.get_tags(project_id)

        # get the height and width of the image
        image_width = image_labeling_data['asset']['size']['width']
        image_height = image_labeling_data['asset']['size']['height']

        # for each labeled region in this asset get the map lables to tag ids and map boundaries formats
        # from vott to azure cognitive services then upload to the cognitive services project.
        for labeled_region in image_labeling_data['regions']:
            for label in labeled_region['tags']:
                for tag in tags:
                    if tag.name == label:
                        labels.append(tag.id)
                        count_of_labels_applied_to_region = count_of_labels_applied_to_region + 1
                        break
            if count_of_labels_applied_to_region > 0:
                count_of_regions_applied_to_image = count_of_regions_applied_to_image + 1
            else:
                return func.HttpResponse(
                    "This Azure Cognitive Services Object Detection project does not contain any labeling tags.  Please add tags to the project before attempting to add labeled data into the project.",
                    status_code=400)

            top_left_x = labeled_region['points'][0]['x']
            top_left_y = labeled_region['points'][0]['y']
            top_right_x = labeled_region['points'][1]['x']
            top_right_y = labeled_region['points'][1]['y']
            bottom_right_x = labeled_region['points'][2]['x']
            bottom_right_y = labeled_region['points'][2]['y']
            bottom_left_x = labeled_region['points'][3]['x']
            bottom_left_y = labeled_region['points'][3]['y']

            # calculate normalized coordinates.
            normalized_left = top_left_x / image_width
            normalized_top = top_left_y / image_height
            normalized_width = (top_right_x - top_left_x) / image_width
            normalized_height = (bottom_left_y - top_left_y) / image_height

            regions = [
                Region(tag_id=labels[0],
                       left=normalized_left,
                       top=normalized_top,
                       width=normalized_width,
                       height=normalized_height)
            ]

        labeled_images_with_regions.append(
            ImageUrlCreateEntry(url=image_url, regions=regions))
        upload_result = trainer.create_images_from_urls(
            project_id, images=labeled_images_with_regions)

        result = ""
        if upload_result.is_batch_successful:
            for image in upload_result.images:
                result = result + "Image " + image.source_url + " Status: " + image.status + ", "
            return func.HttpResponse(
                "Images successfully uploaded with " +
                str(count_of_regions_applied_to_image) + " regions and " +
                str(count_of_labels_applied_to_region) +
                " label(s) to project " + project_id + "with result: " +
                result,
                status_code=200)
        else:
            success = True
            for image in upload_result.images:
                result = result + "Image " + image.source_url + " Status: " + image.status + ", "
                if not "ok" in image.status.lower():
                    success = False

            if success:
                return func.HttpResponse(
                    "Image batch upload succeeded with result: " + result,
                    status_code=200)
            else:
                return func.HttpResponse(
                    "Image batch upload failed with result: " + result,
                    status_code=400)

    else:
        return func.HttpResponse(
            "Please pass valid a valid image url and labels in the request body using the parameter names: ImageUrl and DataLabels.",
            status_code=400)
Пример #22
0
class UploadDataset:
    def __init__(self, files_to_upload: list, project_name: str) -> None:
        self.files_to_upload = files_to_upload

        credentials = ApiKeyCredentials(
            in_headers={"Training-key": TRAINING_KEY})
        self.trainer = CustomVisionTrainingClient(TRAINING_ENDPOINT,
                                                  credentials)

        self.project_name = project_name

        self.max_byte_size = 4000000

        self.project: Project = self._connect_to_or_create_project(
            project_name=self.project_name)
        # Make two tags in the new project
        self.green_car_seal_tag = self._get_or_create_tag("green_car_seal")
        self.red_car_seal_tag = self._get_or_create_tag("red_car_seal")
        self.label_to_tag_id = {
            0: self.red_car_seal_tag.id,
            1: self.green_car_seal_tag.id,
        }

    def _connect_to_or_create_project(self, project_name: str) -> Project:
        projects = self.trainer.get_projects()
        project_id = next((p.id for p in projects if p.name == project_name),
                          None)

        if project_id is not None:
            print("Connecting to existing project...")
            return self.trainer.get_project(project_id)

        print("Creating new project...")
        obj_detection_domain = next(
            domain for domain in self.trainer.get_domains()
            if domain.type == "ObjectDetection" and domain.name == "General")
        return self.trainer.create_project(project_name,
                                           domain_id=obj_detection_domain.id)

    def _get_or_create_tag(self, tag_name) -> Tag:
        tags = self.trainer.get_tags(self.project.id)
        for tag in tags:
            if tag.name == tag_name:
                return self.trainer.get_tag(self.project.id, tag.id)

        return self.trainer.create_tag(self.project.id, tag_name)

    def _read_annotation_file(self, annotation_path: str) -> list:
        annotations = []
        with open(annotation_path, "r") as f:

            for line in f:
                line = line.strip()
                parameter_list = line.split(" ")
                label = int(parameter_list[0])
                x, y, w, h = list(map(float, parameter_list[1:]))

                left = x - w / 2
                if left < 0:  # Accounting for previous rounding error
                    left = 0
                top = y - h / 2
                if top < 0:  # Accounting for previous rounding error
                    top = 0

                if left + w > 1:  # Accounting for previous rounding error
                    w = 1 - left
                if top + h > 1:  # Accounting for previous rounding error
                    h = 1 - top

                try:
                    tag_id = self.label_to_tag_id[label]
                except:
                    raise ValueError(
                        f"Wrong label {label} at {annotation_path}")

                annotations.append(
                    Region(
                        tag_id=tag_id,
                        left=left,
                        top=top,
                        width=w,
                        height=h,
                    ))
        return annotations

    def main(self) -> None:
        dataset_path = os.path.join(os.path.dirname(__file__),
                                    "../../../dataset")

        existing_image_count = self.trainer.get_image_count(
            project_id=self.project.id)
        file_number = existing_image_count
        self.files_to_upload = self.files_to_upload[file_number:]

        for file_name in self.files_to_upload:
            tagged_images_with_regions = []

            annotations: list = self._read_annotation_file(
                annotation_path=os.path.join(dataset_path, "annotations",
                                             file_name + ".txt"), )
            image_bytes: bytes = read_and_resize_image(
                image_path=os.path.join(dataset_path, "images",
                                        file_name + ".JPG"),
                max_byte_size=self.max_byte_size,
            )
            print(f"Image {file_name} is {len(image_bytes)} bytes")
            tagged_images_with_regions.append(
                ImageFileCreateEntry(name=file_name,
                                     contents=image_bytes,
                                     regions=annotations))
            print("Upload images...")
            upload_result = self.trainer.create_images_from_files(
                self.project.id,
                ImageFileCreateBatch(images=tagged_images_with_regions))
            if not upload_result.is_batch_successful:
                print("Image batch upload failed.")
                for image in upload_result.images:
                    print("Image status: ", image.status)
                exit(-1)
            print(
                f"Uploaded file number {file_number+1} of {len(self.files_to_upload)}"
            )
            file_number += 1
Пример #23
0
class Classifier:
    """
        Class for interacting with Custom Vision. Contatins three key methods:
            - predict_imgage() / predicts a an image
            - upload_images() / reads image URLs from Blob Storage and uploads to Custom Vision
            - train() / trains a model
    """
    def __init__(self) -> None:
        """
            Reads configuration file
            Initializes connection to Azure Custom Vision predictor and training resources.

            Parameters:
            blob_service_client: Azure Blob Service interaction client

            Returns:
            None
        """
        self.ENDPOINT = Keys.get("CV_ENDPOINT")
        self.project_id = Keys.get("CV_PROJECT_ID")
        self.prediction_key = Keys.get("CV_PREDICTION_KEY")
        self.training_key = Keys.get("CV_TRAINING_KEY")
        self.base_img_url = Keys.get("BASE_BLOB_URL")
        self.prediction_resource_id = Keys.get("CV_PREDICTION_RESOURCE_ID")

        self.prediction_credentials = ApiKeyCredentials(
            in_headers={"Prediction-key": self.prediction_key})
        self.predictor = CustomVisionPredictionClient(
            self.ENDPOINT, self.prediction_credentials)
        self.training_credentials = ApiKeyCredentials(
            in_headers={"Training-key": self.training_key})
        self.trainer = CustomVisionTrainingClient(self.ENDPOINT,
                                                  self.training_credentials)
        connect_str = Keys.get("BLOB_CONNECTION_STRING")
        self.blob_service_client = BlobServiceClient.from_connection_string(
            connect_str)
        try:
            # get all project iterations
            iterations = self.trainer.get_iterations(self.project_id)
            # find published iterations
            puplished_iterations = [
                iteration for iteration in iterations
                if iteration.publish_name != None
            ]
            # get the latest published iteration
            puplished_iterations.sort(key=lambda i: i.created)
            self.iteration_name = puplished_iterations[-1].publish_name

            with api.app.app_context():
                models.update_iteration_name(self.iteration_name)
        except Exception as e:
            logging.info(e)
            self.iteration_name = "iteration1"

    def predict_image_url(self, img_url: str) -> Dict[str, float]:
        """
            Predicts label(s) of Image read from URL.

            Parameters:
            img_url: Image URL

            Returns:
            (prediction (dict[str,float]): labels and assosiated probabilities,
            best_guess: (str): name of the label with highest probability)
        """
        with api.app.app_context():
            self.iteration_name = models.get_iteration_name()
        res = self.predictor.classify_image_url(self.project_id,
                                                self.iteration_name, img_url)
        pred_kv = dict([(i.tag_name, i.probability) for i in res.predictions])
        best_guess = max(pred_kv, key=pred_kv.get)

        return pred_kv, best_guess

    def predict_image(self, img) -> Dict[str, float]:
        """
            Predicts label(s) of Image read from URL.
            ASSUMES:
            -image of type .png
            -image size less than 4MB
            -image resolution at least 256x256 pixels

            Parameters:
            img_url: .png file

            Returns:
            (prediction (dict[str,float]): labels and assosiated probabilities,
            best_guess: (str): name of the label with highest probability)
        """
        with api.app.app_context():
            self.iteration_name = models.get_iteration_name()
        res = self.predictor.classify_image_with_no_store(
            self.project_id, self.iteration_name, img)
        # reset the file head such that it does not affect the state of the file handle
        img.seek(0)
        pred_kv = dict([(i.tag_name, i.probability) for i in res.predictions])
        best_guess = max(pred_kv, key=pred_kv.get)
        return pred_kv, best_guess

    def predict_image_by_post(self, img) -> Dict[str, float]:
        """
            Predicts label(s) of Image read from URL.
            ASSUMES:
            -image of type .png
            -image size less than 4MB
            -image resolution at least 256x256 pixels

            Parameters:
            img_url: .png file

            Returns:
            (prediction (dict[str,float]): labels and assosiated probabilities,
            best_guess: (str): name of the label with highest probability)
        """

        headers = {
            'content-type': 'application/octet-stream',
            "prediction-key": self.prediction_key
        }
        res = requests.post(Keys.get("CV_PREDICTION_ENDPOINT"),
                            img.read(),
                            headers=headers).json()
        img.seek(0)
        pred_kv = dict([(i["tagName"], i["probability"])
                        for i in res["predictions"]])
        best_guess = max(pred_kv, key=pred_kv.get)
        return pred_kv, best_guess

    def __chunks(self, lst, n):
        """
            Helper method used by upload_images() to upload URL chunks of 64, which is maximum chunk size in Azure Custom Vision.
        """
        for i in range(0, len(lst), n):
            yield lst[i:i + n]

    def upload_images(self, labels: List, container_name) -> None:
        """
            Takes as input a list of labels, uploads all assosiated images to Azure Custom Vision project.
            If label in input already exists in Custom Vision project, all images are uploaded directly.
            If label in input does not exist in Custom Vision project, new label (Tag object in Custom Vision) is created before uploading images

            Parameters:
            labels (str[]): List of labels

            Returns:
            None
        """
        url_list = []
        existing_tags = list(self.trainer.get_tags(self.project_id))

        try:
            container = self.blob_service_client.get_container_client(
                container_name)
        except Exception as e:
            print(
                "could not find container with CONTAINER_NAME name error: ",
                str(e),
            )

        for label in labels:
            # check if input has correct type
            if not isinstance(label, str):
                raise Exception("label " + str(label) + " must be a string")

            tag = [t for t in existing_tags if t.name == label]
            # check if tag already exists
            if len(tag) == 0:
                try:
                    tag = self.trainer.create_tag(self.project_id, label)
                    print("Created new label in project: " + label)
                except Exception as e:
                    print(e)
                    continue
            else:
                tag = tag[0]

            blob_prefix = f"{label}/"
            blob_list = container.list_blobs(name_starts_with=blob_prefix)

            if not blob_list:
                raise AttributeError("no images for this label")

            # build correct URLs and append to URL list
            for blob in blob_list:
                blob_url = f"{self.base_img_url}/{container_name}/{blob.name}"
                url_list.append(
                    ImageUrlCreateEntry(url=blob_url, tag_ids=[tag.id]))

        # upload URLs in chunks of 64
        print("Uploading images from blob to CV")
        img_f = 0
        img_s = 0
        img_d = 0
        itr_img = 0
        chunks = self.__chunks(url_list, setup.CV_MAX_IMAGES)
        num_imgs = len(url_list)
        error_messages = set()
        for url_chunk in chunks:
            upload_result = self.trainer.create_images_from_urls(
                self.project_id, images=url_chunk)
            if not upload_result.is_batch_successful:
                for image in upload_result.images:
                    if image.status == "OK":
                        img_s += 1
                    elif image.status == "OKDuplicate":
                        img_d += 1
                    else:
                        error_messages.add(image.status)
                        img_f += 1

                    itr_img += 1
            else:
                batch_size = len(upload_result.images)
                img_s += batch_size
                itr_img += batch_size

            prc = itr_img / num_imgs
            print(
                f"\t succesfull: \033[92m {img_s:5d} \033]92m \033[0m",
                f"\t duplicates: \033[33m {img_d:5d} \033]33m \033[0m",
                f"\t failed: \033[91m {img_f:5d} \033]91m \033[0m",
                f"\t [{prc:03.2%}]",
                sep="",
                end="\r",
                flush=True,
            )

        print()
        if len(error_messages) > 0:
            print("Error messages:")
            for error_message in error_messages:
                print(f"\t {error_message}")

    def get_iteration(self):
        iterations = self.trainer.get_iterations(self.project_id)
        iterations.sort(key=(lambda i: i.created))
        newest_iteration = iterations[-1]
        return newest_iteration

    def delete_iteration(self) -> None:
        """
            Deletes the oldest iteration in Custom Vision if there are 11 iterations.
            Custom Vision allows maximum 10 iterations in the free version.
        """
        iterations = self.trainer.get_iterations(self.project_id)
        if len(iterations) >= setup.CV_MAX_ITERATIONS:
            iterations.sort(key=lambda i: i.created)
            oldest_iteration = iterations[0].id
            self.trainer.unpublish_iteration(self.project_id, oldest_iteration)
            self.trainer.delete_iteration(self.project_id, oldest_iteration)

    def train(self, labels: list) -> None:
        """
            Trains model on all labels specified in input list, exeption is raised by self.trainer.train_projec() is asked to train on non existent labels.
            Generates unique iteration name, publishes model and sets self.iteration_name if successful.
            Parameters:
            labels (str[]): List of labels
        """
        try:
            email = Keys.get("EMAIL")
        except Exception:
            print("No email found, setting to empty")
            email = ""

        self.delete_iteration()
        print("Training...")
        iteration = self.trainer.train_project(
            self.project_id,
            reserved_budget_in_hours=1,
            notification_email_address=email,
        )
        # Wait for training to complete
        start = time.time()
        while iteration.status != "Completed":
            iteration = self.trainer.get_iteration(self.project_id,
                                                   iteration.id)
            minutes, seconds = divmod(time.time() - start, 60)
            print(
                f"Training status: {iteration.status}",
                f"\t[{minutes:02.0f}m:{seconds:02.0f}s]",
                end="\r",
            )
            time.sleep(1)

        print()

        # The iteration is now trained. Publish it to the project endpoint
        iteration_name = uuid.uuid4()
        self.trainer.publish_iteration(
            self.project_id,
            iteration.id,
            iteration_name,
            self.prediction_resource_id,
        )
        with api.app.app_context():
            self.iteration_name = models.update_iteration_name(iteration_name)

    def delete_all_images(self) -> None:
        """
            Function for deleting uploaded images in Customv Vision.
        """
        try:
            self.trainer.delete_images(self.project_id,
                                       all_images=True,
                                       all_iterations=True)
        except Exception as e:
            raise Exception("Could not delete all images: " + str(e))

    def retrain(self):
        """
            Train model on all labels and update iteration.
        """
        with api.app.app_context():
            labels = models.get_all_labels()

        self.upload_images(labels, setup.CONTAINER_NAME_NEW)
        try:
            self.train(labels)
        except CustomVisionErrorException as e:
            msg = "No changes since last training"
            print(e, "exiting...")
            raise excp.BadRequest(msg)

    def hard_reset_retrain(self):
        """
            Train model on all labels and update iteration.
            This method sleeps for 60 seconds to make sure all
            old images are deleted from custom vision before
            uploading original dataset.
        """
        with api.app.app_context():
            labels = models.get_all_labels()

        # Wait 60 seconds to make sure all images are deleted in custom vision
        time.sleep(60)
        self.upload_images(labels, setup.CONTAINER_NAME_ORIGINAL)
        try:
            self.train(labels)
        except CustomVisionErrorException as e:
            msg = "No changes since last training"
            print(e, "exiting...")
            raise excp.BadRequest(msg)
Пример #24
0
trainAPI = CustomVisionTrainingClient(training_key, ENDPOINT)

domains = trainAPI.get_domains()
project = trainAPI.get_projects()[0]

modelPath = os.getcwd() + '/model'
dataPath = os.path.join(modelPath, 'dataset.json')
imagesPath = os.path.join(modelPath, 'images')
print(modelPath, dataPath, imagesPath)

file = open(dataPath)
images = json.loads(file.read())
file.close()

print(project)
existingTags = trainAPI.get_tags(project_id=project.id)

# for tag in existingTags:
#     trainAPI.delete_tag(project.id , tag.id)

tag_flag = 0
if len(existingTags) == 0:
    tag_flag = 1

if tag_flag == 1:
    tagstoimport = {}

    for image in images:
        for tag in image['tags']:
            tagstoimport[tag['tagName']] = tag
Пример #25
0
from azure.cognitiveservices.vision.customvision.training import CustomVisionTrainingClient
from azure.cognitiveservices.vision.customvision.training.models import ImageFileCreateEntry, Region
import os

ENDPOINT = ""

# Replace with a valid key
training_key = "<>"

trainer = CustomVisionTrainingClient(training_key, endpoint=ENDPOINT)

project = trainer.get_projects()[0]

balloon_tag = trainer.get_tags(project.id)[0]

balloon_image_regions = {}

for filename in os.listdir("./out/"):
    if filename.endswith(".txt"):
        f = open("./out/" + filename, "r")
        regions = []

        for line in f.readlines():
            line = line.replace("\n", "")
            regions.append(line.split(" "))
        jpgName = filename.replace(".txt", ".jpg")
        balloon_image_regions[jpgName] = regions

print(balloon_image_regions)

# Update this with the path to where you downloaded the images.