示例#1
0
import turicreate as tc
tc.config.set_runtime_config('TURI_DEFAULT_NUM_PYLAMBDA_WORKERS', 20)
imgs = tc.load_images('imgs')
imgs['label'] = imgs['path'].element_slice(5, -4)
model = tc.one_shot_object_detector.create(imgs, 'label')
model.save('model.model')

import turicreate as tc

# Import the data
# annotations = tc.SFrame.read_json('file_resized.json', orient='records')
data = tc.load_images('test.jpg')
# data = images.join(annotations)

# "expolore" the data
# this part work only on Mac.
# data['image_with_ground_truth'] = \
#    tc.object_detector.util.draw_bounding_boxes(data['image'], data['annotation'])
# data.explore()

model = tc.load_model('their_model.model')

# Test the model
predictions = model.predict(data)
data['predicated_image'] = \
  tc.object_detector.util.draw_bounding_boxes(data['image'], predictions)

data['predicated_image'][0].show()
data.explore()
示例#3
0
                bkg_w_obj.save(fp=output_fp, format="png")
                if args.annotate:
                    # Save annotation data
                    annotations.append({"path": output_fp, "annotations": ann})
                #print(n)
                n += 1

    if args.annotate:
        print("Saving out Annotations", flush=True)
        # Save annotations
        with open("annotations.json", "w") as f:
            f.write(json.dumps(annotations))

    if args.sframe:
        print("Saving out SFrame", flush=True)
        # Write out data to an sframe for turicreate training
        import turicreate as tc
        # Load images and annotations to sframes
        images = tc.load_images(output_images).sort("path")
        annots = tc.SArray(annotations).unpack(
            column_name_prefix=None).sort("path")
        # Join
        images = images.join(annots, how='left', on='path')
        # Save out sframe
        images[['image', 'path', 'annotations']].save("training_data.sframe")

    total_images = len(
        [f for f in os.listdir(output_images) if not f.startswith(".")])
    print("Done! Created {} synthetic training images.".format(total_images),
          flush=True)
示例#4
0
import turicreate as tc

# Load the style and content images
styles = tc.load_images('downloads/styles/')
content = tc.load_images('downloads/content/')

# Create a StyleTransfer model
model = tc.style_transfer.create(styles, content)

# Load some test images
test_images = tc.load_images('test/')

# Stylize the test images
stylized_images = model.stylize(test_images)

# Save the model for later use in Turi Create
model.save('mymodel.model')

# Export for use in Core ML
model.export_coreml('MyStyleTransfer.mlmodel')
// BEGIN NST_training_1
import turicreate as tc

# Configure as required
style_images_directory = 'style/'
content_images_directory = 'content/'
training_cycles_to_perform = 6000
output_model_filename = 'StyleTransferModel'
output_image_constraints = (800, 800)

# Load the style and content images
styles = tc.load_images(style_images_directory)
content = tc.load_images(content_images_directory)

# Create a StyleTransfer model
model = tc.style_transfer.create(styles, content,
    max_iterations=training_cycles_to_perform)

# Export for use in Core ML
model.export_coreml(output_model_filename + '.mlmodel',
    image_shape=output_image_constraints)
// END NST_training_1
示例#6
0
# Import turicreate
import turicreate as tc

# Use GPU
tc.config.set_num_gpus(-1)

# Load the style and content images
styles = tc.load_images('Dataset/style/')
content = tc.load_images('Dataset/content/')

# Create a StyleTransfer model
model = tc.style_transfer.create(styles, content)

# Load some test images
test_images = tc.load_images('Dataset/test/')

# Stylize the test images
stylized_images = model.stylize(test_images)

# Save the model for later use in Turi Create
model.save('image-styler.model')

# Export for use in Core ML
model.export_coreml('ImageStyler.mlmodel')
示例#7
0
import turicreate as tc

model = tc.load_model("mymodel_pencil.model")

# Load some test images
test_images = tc.load_images('../contents/')

# Stylize the test images
stylized_images = model.stylize(test_images)

print(type(stylized_images))
print(stylized_images)

stylized_images['stylized_image'][1].save("test1.png")
示例#8
0
import turicreate as tc
import mxnet as mx
import numpy as np
# setgup
tc.config.set_num_gpus(-1)
# Load the style and content images
styles = tc.load_images('/input/style_transfer_data/style/')
content = tc.load_images('/input/style_transfer_data/content/')
# Create a StyleTransfer model
model = tc.style_transfer.create(styles, content)
# Load some test images
test_images = tc.load_images('/input/style_transfer_data/test/')
# Stylize the test images
stylized_images = model.stylize(test_images)
# Save the model for later use in Turi Create
model.save('mymodel.model')

# Export for use in Core ML
model.export_coreml('MyStyleTransfer.mlmodel')
示例#9
0
import turicreate as tc

# define the training and test data
annotations = tc.SFrame('annotations.csv')
images = tc.load_images('training_images')
data = images.join(annotations)
train, test = data.random_split(0.8)

# train and evaluate the model
model = tc.object_detector.create(train)
metrics = model.evaluate(test)

# save the model and export to core ml (to be used in ios)
model.save('thashibarimodel.model')
model.export_coreml('thashibarimodel.mlmodel')
示例#10
0
import turicreate as tc

# disable gpu
# tc.config.set_num_gpus(0)
# pip uninstall -y mxnet && pip install mxnet-cu90==1.1.0
# pip uninstall -y mxnet-cu90 && pip install mxnet-cu91==1.1.0  for cuda 90
# Load the style and content images
styles = tc.load_images('../styles/pencil-portrait-10.jpg')
content = tc.load_images('../contents/')

# Create a StyleTransfer model
model = tc.style_transfer.create(styles,
                                 content,
                                 batch_size=3,
                                 max_iterations=500)

# Save the model for later use in Turi Create
model.save('mymodel_pencil.model')
#
# # Export for use in Core ML
# model.export_coreml('MyStyleTransfer.mlmodel')

# Load some test images
test_images = tc.load_images('../contents/')

# Stylize the test images
stylized_images = model.stylize(test_images)
示例#11
0
import turicreate as tc
import json

# # Name your model
modelName = "MyModel"

# # Build train JSON SFrame
with open('annotations.json') as j:
    annotations = json.load(j)

annotationData = tc.SFrame(annotations)
data = tc.load_images('images/')
data = data.join(annotationData)
trainData, testData = data.random_split(0.8)

# # Check ground truth
trainData['image_with_ground_truth'] =     tc.object_detector.util.draw_bounding_boxes(trainData['image'], trainData['annotations'])
# trainData.explore()

# # Train the model
model = tc.object_detector.create(trainData, feature="image", annotations="annotations", max_iterations=20)
model.save(modelName + '.model')

# # Predictions
predictions = model.predict(testData, confidence_threshold=0.0, verbose=True)
# predictions.explore()
metrics = model.evaluate(testData)
print('mAP: {:.1%}'.format(metrics['mean_average_precision_50']))
# metrics

# # Export model
示例#12
0
import turicreate as tc

# load model
model = tc.load_model('thashibarimodel.model')

# load test data
test = tc.load_images('test_images')

# evaluate model
predictions = model.predict(test)
test['predicted_image']= tc.object_detector.util.draw_bounding_boxes(test['image'],predictions)
test[['image', 'predicted_image']].explore()
示例#13
0
import turicreate as tc
import os

current_dir = os.path.dirname(__file__)
model = tc.load_model(os.path.join(
    current_dir, '../model/v1.model'))

images = tc.load_images(os.path.join(current_dir, '../data/test'))
images['predictions'] = model.predict(images)
images.print_rows(num_rows=10)
示例#14
0
import turicreate as tc

# Import the data
annotations = tc.SFrame.read_json('data.json', orient='records')
images = tc.load_images('images/')
data = images.join(annotations)

# Split the data for testing
train_data, test_data = data.random_split(0.8)

# "expolore" the data
# this part work only on Mac.
# data['image_with_ground_truth'] = \
#    tc.object_detector.util.draw_bounding_boxes(data['image'], data['annotation'])
# data.explore()

# Start training, this will take a while
print("Start Training")
model = tc.object_detector.create(train_data, max_iterations=1)

# Save the model for later use in Turi Create
model.save('mymodel.model')

# Export for use in Core ML
model.export_coreml('MyCustomObjectDetector.mlmodel')
示例#15
0
import turicreate as tc

# Load the style and content images
styles = tc.load_images('style/')
content = tc.load_images('content/')

# Create a StyleTransfer model
model = tc.style_transfer.create(styles, content)

# Load some test images
test_images = tc.load_images('test/')

# Stylize the test images
stylized_images = model.stylize(test_images)

# Save the model for later use in Turi Create
model.save('model.model')

# Export for use in Core ML
model.export_coreml('model.mlmodel')
import turicreate as tc

images = tc.load_images('./images/')
images['labels'] = images['path'].element_slice(9, -4)

model = tc.one_shot_object_detector.create(images, 'labels')

predictions = model.predict(data)

# Export to Core ML
model.export_coreml('grn.mlmodel')
示例#17
0
import turicreate as tc
import os

train_model = tc.load_model('dog_classifier.model')
image_test = tc.load_images('newdog.jpg')
image_test['predictions'] = train_model.predict(image_test)
print(image_test['predictions'])
print(image_test)
import turicreate as tc

# Import the data
annotations = tc.SFrame.read_json('file_resized.json', orient='records')
images = tc.load_images('images_resized/')
data = images.join(annotations)

# Split the data for testing
train_data, test_data = data.random_split(0.8)

# "expolore" the data
# this part work only on Mac.
data['image_with_ground_truth'] = \
   tc.object_detector.util.draw_bounding_boxes(data['image'], data['annotation'])
data.explore()

# Start training, this will take a while
print("Start Training")
model = tc.object_detector.create(train_data, max_iterations=1)

# Test the model
predictions = model.predict(test_data)
test_data['predicated_image'] = tc.object_detector.util.draw_bounding_boxes(
    test_data['image'], predictions)
test_data['predicated_image'][0].show()
test_data.explore()
model.evaluate(test_data)

# Save the model for later use in Turi Create
model.save('mymodel.model')