示例#1
0
def main():
    trainer = training_api.TrainingApi(CUSTOMVISION_TRAINING_KEY)
    predictor = prediction_endpoint.PredictionEndpoint(
        CUSTOMVISION_PREDICTION_KEY)

    # find project
    print('Get project {} info...'.format(PROJECT_NAME))
    project_id = None
    for p in trainer.get_projects():
        if p.name == PROJECT_NAME:
            project_id = p.id
            break

    if project_id is None:
        print('[ERROR] Project {} not found!'.format(PROJECT_NAME))
        return

    print('Predicting...')
    with open('test_image.jpg', 'rb') as image_data:
        results = predictor.predict_image(project_id, image_data.read())

    print('Result:')
    for prediction in results.predictions:
        print("{0}: {1:.2f}%".format(prediction.tag_name,
                                     prediction.probability * 100))
def find_or_train_project():
    try:
        training_key = os.environ[TRAINING_KEY_ENV_NAME]
    except KeyError:
        raise SubscriptionKeyError(
            "You need to set the {} env variable.".format(
                TRAINING_KEY_ENV_NAME))

    # Use the training API to find the SDK sample project created from the training example.
    from custom_vision_training_samples import train_project, SAMPLE_PROJECT_NAME
    trainer = training_api.TrainingApi(training_key)

    for proj in trainer.get_projects():
        if (proj.name == SAMPLE_PROJECT_NAME):
            return proj

    # Or, if not found, we will run the training example to create it.
    return train_project(training_key)
示例#3
0
def main():
    #show_image(clean_dir,filename)
    clean_dir = 
    crack_dir = 
    

    training_key = 
    prediction_key = 

    trainer = training_api.TrainingApi(training_key)
    
    project = create_project(trainer, "Cracks Recognition")
    
    clean_tag = create_tag(trainer, project, "clean")
    crack_tag = create_tag(trainer, project, "cracked")
    
    upload_images(trainer,project, clean_dir,clean_tag)
    upload_images(trainer,project, crack_dir,crack_tag)
    
    run_training(trainer,project)
def train_project(subscription_key):

    trainer = training_api.TrainingApi(subscription_key)

    # Create a new project
    print("Creating project...")
    project = trainer.create_project(SAMPLE_PROJECT_NAME,
                                     classification_type=Classifier.multiclass)

    # Make two tags in the new project
    hemlock_tag = trainer.create_tag(project.id, "Hemlock")
    cherry_tag = trainer.create_tag(project.id, "Japanese Cherry")
    pine_needle_tag = trainer.create_tag(project.id, "Pine Needle Leaves")
    flat_leaf_tag = trainer.create_tag(project.id, "Flat Leaves")

    print("Adding images...")
    hemlock_dir = os.path.join(IMAGES_FOLDER, "Hemlock")
    for image in os.listdir(hemlock_dir):
        with open(os.path.join(hemlock_dir, image), mode="rb") as img_data:
            trainer.create_images_from_data(
                project.id, img_data.read(),
                [hemlock_tag.id, pine_needle_tag.id])

    cherry_dir = os.path.join(IMAGES_FOLDER, "Japanese Cherry")
    for image in os.listdir(cherry_dir):
        with open(os.path.join(cherry_dir, image), mode="rb") as img_data:
            trainer.create_images_from_data(project.id, img_data.read(),
                                            [cherry_tag.id, flat_leaf_tag.id])

    print("Training...")
    iteration = trainer.train_project(project.id)
    while (iteration.status == "Training"):
        iteration = trainer.get_iteration(project.id, iteration.id)
        print("Training status: " + iteration.status)
        time.sleep(1)

    # The iteration is now trained. Make it the default project endpoint
    trainer.update_iteration(project.id, iteration.id, is_default=True)
    print("Done!")
    return project
INSTRUCTION:
Before you run the script, please follow the steps below before starting.
    1. Please set the correct local csv file path below
        The two csv files needed here are product of process_data.py

    2. Please adjust threshold in constant below

'''

import pandas as pd
from azure.cognitiveservices.vision.customvision.training import training_api

# Provide appropriate information
project_id = "8d05ada4-02ee-42f6-9784-9c6aa3457f9d"
training_key = "2fb2e54d14fc4c4e8fb264338277c250"
trainer = training_api.TrainingApi(training_key)

# Set the local csv file location here
testing_car_df_path = r"/Users/Koshin/PycharmProjects\deep_learning\testing_car.csv"
prediction_result_df_path = r"/Users/Koshin/PycharmProjects\deep_learning\prediction_result.csv"

# Decide the threshold percentage - default 75%
CONST_THRESHOLD = 75

### In case you are analyzing custom model, please provide appropriate path to custom_model_result.csv
custom_model_result_df_location = r"/Users/Koshin/PycharmProjects\deep_learning\bumper_damage_result.csv"


def csv_to_dataFrame(local_csv_location):

    with open(local_csv_location) as file:
示例#6
0
from settings import TRAINING_KEY, PREDICTION_KEY

from azure.cognitiveservices.vision.customvision.training import training_api
from azure.cognitiveservices.vision.customvision.training.models import ImageUrlCreateEntry

import time

# Replace with a valid key

trainer = training_api.TrainingApi(TRAINING_KEY)

# Create a new project
print("Creating project...")
project = trainer.create_project("dtademo2")
print("Project ID is {0} name {1}".format(project.id, project.name))

# Make two tags in the new project
hemlock_tag = trainer.create_tag(project.id, "Hemlock")
cherry_tag = trainer.create_tag(project.id, "Japanese Cherry")

base_image_url = "https://raw.githubusercontent.com/Microsoft/Cognitive-CustomVision-Windows/master/Samples/"

print("Adding images...")
for image_num in range(1, 10):
    image_url = base_image_url + "Images/Hemlock/hemlock_{}.jpg".format(
        image_num)
    trainer.create_images_from_urls(
        project.id,
        [ImageUrlCreateEntry(url=image_url, tag_ids=[hemlock_tag.id])])

for image_num in range(1, 10):
示例#7
0
def get_trainer():
    training_key = os.environ['TRAINING_KEY']
    return training_api.TrainingApi(training_key)
                            help="Source Training-Key",
                            dest="source_training_key",
                            default=None)
    arg_parser.add_argument("-d",
                            "--dest",
                            action="store",
                            type=str,
                            help="Destination Training-Key",
                            dest="destination_training_key",
                            default=None)
    args = arg_parser.parse_args()

    if (not args.project_id or not args.source_training_key
            or not args.destination_training_key):
        arg_parser.print_help()
        exit(-1)

    print("Collecting information for source project:", args.project_id)

    # Client for Source
    src_trainer = training_api.TrainingApi(args.source_training_key)

    # Client for Destination
    dest_trainer = training_api.TrainingApi(args.destination_training_key)

    destination_project = migrate_project(src_trainer, dest_trainer,
                                          args.project_id)
    tags = migrate_tags(src_trainer, dest_trainer, args.project_id,
                        destination_project.id)
    source_images = migrate_images(src_trainer, dest_trainer, args.project_id,
                                   destination_project.id, tags)
def getExportStatusId(status):
    if (status == 'Failed'):
        return 1
    elif (status == 'Done'):
        return 2
    elif (status == 'Exporting'):
        return 3
    else:
        return 0


if __name__ == '__main__':
    trainingKey = os.getenv('CUSTOM_VISION_SECRET')
    projectId = os.getenv('CUSTOM_VISION_PROJECT_ID')
    assetsRelativePath = os.getenv('ASSETS_RELATIVE_PATH')

    if (trainingKey == None):
        if (len(sys.argv) > 1):
            trainingKey = sys.argv[1]
        else:
            raise Exception('Custom Vision training secret not found in CUSTOM_VISION_SECRET environment variable nor in first command line argument')
    if (projectId == None):
        raise Exception('CUSTOM_VISION_PROJECT_ID environment variable not found')
    if (assetsRelativePath == None):
        raise Exception('ASSETS_RELATIVE_PATH environment variable not found')

    api = training_api.TrainingApi(trainingKey)
    iterationId = trainModel(api, projectId)
    downloadUri = exportIteration(api, projectId, iterationId)
    downloadExportedModel(downloadUri, assetsRelativePath)
示例#10
0
</html>
  """.format(header, tableHTML)
  try:
    displayHTML(style + body)
  except:
    pass

# COMMAND ----------

from azure.cognitiveservices.vision.customvision.training import training_api
from azure.cognitiveservices.vision.customvision.training.models import ImageUrlCreateEntry

CUSTOM_VISION_TRAINING_KEY = dbutils.widgets.get("custom_vision_training_key")
CUSTOM_VISION_PREDICTION_KEY = dbutils.widgets.get("custom_vision_prediction_key")

trainer = training_api.TrainingApi(CUSTOM_VISION_TRAINING_KEY)
projects = trainer.get_projects()
tag_dict = {}
project = next((project for project in projects if project.name == "ShoeStyleTagger"), None)
if project == None:
  # Create a new project
  project = trainer.create_project("ShoeStyleTagger")
  tag_dict["sandals"] = trainer.create_tag(project.id, "sandals")
  tag_dict["slippers"] = trainer.create_tag(project.id, "slippers")
  tag_dict["sneakers"] = trainer.create_tag(project.id, "sneakers")
  tag_dict["boots"] = trainer.create_tag(project.id, "boots")
else:
  tags = trainer.get_tags(project.id)
  tag_dict = dict(((tag.name, tag) for tag in tags))

# COMMAND ----------
示例#11
0
        (blob.name, blob.properties.last_modified)
        for blob in block_blob_service.list_blobs(label_container_name)
        if re.match(r'tagged_(.*).csv', blob.name)
    ]
    if file_date:
        block_blob_service.get_blob_to_path(
            label_container_name,
            max(file_date, key=lambda x: x[1])[0],
            config_file["tagged_output"])
    else:
        raise ValueError(
            "No tagged data exists. Cannot train model without any tagged data."
        )

    from map_validation import detectortest
    trainer = training_api.TrainingApi(config_file["training_key"])
    predictor = prediction_endpoint.PredictionEndpoint(
        config_file["prediction_key"])
    file_date = [
        (blob.name, blob.properties.last_modified)
        for blob in block_blob_service.list_blobs(label_container_name)
        if re.match(r'test_(.*).csv', blob.name)
    ]
    if file_date:
        block_blob_service.get_blob_to_path(
            label_container_name,
            max(file_date, key=lambda x: x[1])[0], config_file["test_output"])
        train_cv_model(config_file["tagged_output"],
                       trainer,
                       config_file["project_id"],
                       config_file["image_dir"],
示例#12
0
def main():
    trainer = training_api.TrainingApi(TRAINING_KEY)

    # Create a new project
    print("Creating project...")
    project = None
    projects = trainer.get_projects()
    existing_project = list(filter(lambda t: t.name == PROJECT_NAME, projects))
    if existing_project:
        project = existing_project[0]
    if project is None:
        project = trainer.create_project(PROJECT_NAME)
    print(project.id)

    # Make tags in the new project
    print("Creating tags...")
    tag_list = trainer.get_tags(project.id)
    if len(tag_list.tags) == 0:
        for i in range(10):
            trainer.create_tag(project.id, i)
            tag_list = trainer.get_tags(project.id)

    # Import labels
    labels = pd.read_csv('mnist/train-labels.csv', header=None)
    labels.columns = ['filename', 'label']

    # Upload tagged images
    print("Uploading tagged images...")
    tagged_images = trainer.get_tagged_images(project.id)
    if len(tagged_images) == 0:
        for index, row in labels.iterrows():
            # comment the next two rows if you want to upload the whole MNIST dataset
            if index > 1000:
                break
            with open("mnist/" + os.fsdecode(row.filename),
                      mode="rb") as img_data:
                # Lookup tag from label
                tag = list(
                    filter(lambda t: t.name == str(row.label),
                           tag_list.tags))[0]
                buffer = img_data.read()
                response = trainer.create_images_from_data(
                    project.id, buffer, [tag.id])
                print(response)

    # Train
    print("Training...")
    iteration_id = project.current_iteration_id
    try:
        iteration = trainer.train_project(project.id)
        while iteration.status == "Training":
            iteration = trainer.get_iteration(project.id, iteration.id)
            print("Training status: " + iteration.status)
            time.sleep(1)
        iteration_id = iteration.id
    except HttpOperationError as error:
        print(error.response.text)

    # The iteration is now trained. Make it the default project endpoint
    trainer.update_iteration(project.id, iteration_id, is_default=True)
    print("Done!")

    # Make a predictions
    predictor = prediction_endpoint.PredictionEndpoint(PREDICTION_KEY)
    with open("mnist/test-images/128.jpg", mode="rb") as test_data:
        results = predictor.predict_image(project.id, test_data.read(),
                                          iteration.id)

    # Display the results.
    for prediction in results.predictions:
        print("\t" + prediction.tag +
              ": {0:.2f}%".format(prediction.probability * 100))
def train_project(training_key):
    trainer = training_api.TrainingApi(training_key)

    # Find the object detection domain
    obj_detection_domain = next(domain for domain in trainer.get_domains()
                                if domain.type == "ObjectDetection")

    # Create a new project
    print("Creating project...")
    project = trainer.create_project("My Detection Project",
                                     domain_id=obj_detection_domain.id)

    # Make two tags in the new project
    fork_tag = trainer.create_tag(project.id, "fork")
    scissors_tag = trainer.create_tag(project.id, "scissors")

    fork_image_regions = {
        "fork_1": [0.145833328, 0.3509314, 0.5894608, 0.238562092],
        "fork_2": [0.294117659, 0.216944471, 0.534313738, 0.5980392],
        "fork_3": [0.09191177, 0.0682516545, 0.757352948, 0.6143791],
        "fork_4": [0.254901975, 0.185898721, 0.5232843, 0.594771266],
        "fork_5": [0.2365196, 0.128709182, 0.5845588, 0.71405226],
        "fork_6": [0.115196079, 0.133611143, 0.676470637, 0.6993464],
        "fork_7": [0.164215669, 0.31008172, 0.767156839, 0.410130739],
        "fork_8": [0.118872553, 0.318251669, 0.817401946, 0.225490168],
        "fork_9": [0.18259804, 0.2136765, 0.6335784, 0.643790841],
        "fork_10": [0.05269608, 0.282303959, 0.8088235, 0.452614367],
        "fork_11": [0.05759804, 0.0894935, 0.9007353, 0.3251634],
        "fork_12": [0.3345588, 0.07315363, 0.375, 0.9150327],
        "fork_13": [0.269607842, 0.194068655, 0.4093137, 0.6732026],
        "fork_14": [0.143382356, 0.218578458, 0.7977941, 0.295751631],
        "fork_15": [0.19240196, 0.0633497, 0.5710784, 0.8398692],
        "fork_16": [0.140931368, 0.480016381, 0.6838235, 0.240196079],
        "fork_17": [0.305147052, 0.2512582, 0.4791667, 0.5408496],
        "fork_18": [0.234068632, 0.445702642, 0.6127451, 0.344771236],
        "fork_19": [0.219362751, 0.141781077, 0.5919118, 0.6683006],
        "fork_20": [0.180147052, 0.239820287, 0.6887255, 0.235294119]
    }

    scissors_image_regions = {
        "scissors_1": [0.4007353, 0.194068655, 0.259803921, 0.6617647],
        "scissors_2": [0.426470578, 0.185898721, 0.172794119, 0.5539216],
        "scissors_3": [0.289215684, 0.259428144, 0.403186262, 0.421568632],
        "scissors_4": [0.343137264, 0.105833367, 0.332107842, 0.8055556],
        "scissors_5": [0.3125, 0.09766343, 0.435049027, 0.71405226],
        "scissors_6": [0.379901975, 0.24308826, 0.32107842, 0.5718954],
        "scissors_7": [0.341911763, 0.20714055, 0.3137255, 0.6356209],
        "scissors_8": [0.231617644, 0.08459154, 0.504901946, 0.8480392],
        "scissors_9": [0.170343131, 0.332957536, 0.767156839, 0.403594762],
        "scissors_10": [0.204656869, 0.120539248, 0.5245098, 0.743464053],
        "scissors_11": [0.05514706, 0.159754932, 0.799019635, 0.730392158],
        "scissors_12": [0.265931368, 0.169558853, 0.5061275, 0.606209159],
        "scissors_13": [0.241421565, 0.184264734, 0.448529422, 0.6830065],
        "scissors_14": [0.05759804, 0.05027781, 0.75, 0.882352948],
        "scissors_15": [0.191176474, 0.169558853, 0.6936275, 0.6748366],
        "scissors_16": [0.1004902, 0.279036, 0.6911765, 0.477124184],
        "scissors_17": [0.2720588, 0.131977156, 0.4987745, 0.6911765],
        "scissors_18": [0.180147052, 0.112369314, 0.6262255, 0.6666667],
        "scissors_19": [0.333333343, 0.0274019931, 0.443627447, 0.852941155],
        "scissors_20": [0.158088237, 0.04047389, 0.6691176, 0.843137264]
    }

    # Go through the data table above and create the images
    print("Adding images...")
    tagged_images_with_regions = []

    for file_name in fork_image_regions.keys():
        x, y, w, h = fork_image_regions[file_name]
        regions = [
            Region(tag_id=fork_tag.id, left=x, top=y, width=w, height=h)
        ]

        with open(os.path.join(IMAGES_FOLDER, "fork", file_name + ".jpg"),
                  mode="rb") as image_contents:
            tagged_images_with_regions.append(
                ImageFileCreateEntry(name=file_name,
                                     contents=image_contents.read(),
                                     regions=regions))

    for file_name in scissors_image_regions.keys():
        x, y, w, h = scissors_image_regions[file_name]
        regions = [
            Region(tag_id=scissors_tag.id, left=x, top=y, width=w, height=h)
        ]

        with open(os.path.join(IMAGES_FOLDER, "scissors", file_name + ".jpg"),
                  mode="rb") as image_contents:
            tagged_images_with_regions.append(
                ImageFileCreateEntry(name=file_name,
                                     contents=image_contents.read(),
                                     regions=regions))

    trainer.create_images_from_files(project.id,
                                     images=tagged_images_with_regions)

    print("Training...")
    iteration = trainer.train_project(project.id)
    while (iteration.status != "Completed"):
        iteration = trainer.get_iteration(project.id, iteration.id)
        print("Training status: " + iteration.status)
        time.sleep(1)

    # The iteration is now trained. Make it the default project endpoint
    trainer.update_iteration(project.id, iteration.id, is_default=True)
    print("Done!")
    return project, iteration