def test_setup_invalid_category(): rw = RunwayModel() @rw.setup(options={'category': category(choices=['Starks', 'Lannisters'])}) def setup(opts): pass rw.run(debug=True) client = get_test_client(rw) response = client.post('/setup', json={ 'category': 'Tyrells' }) assert response.status_code == 400 json_response = json.loads(response.data) assert 'error' in json_response # ensure the user is displayed an error that indicates the category option # is problematic assert 'Invalid argument: category' in json_response['error'] # ensure the user is displayed an error that indicates the problematic value assert 'Tyrells' in json_response['error']
def test_command_invalid_category(): rw = RunwayModel() inputs = {'category': category(choices=['Starks', 'Lannisters'])} outputs = {'reflect': text } @rw.command('test_command', inputs=inputs, outputs=outputs) def test_command(opts): return opts['category'] rw.run(debug=True) client = get_test_client(rw) response = client.post('/test_command', json={ 'category': 'Targaryen' }) assert response.status_code == 400 json_response = json.loads(response.data) assert 'error' in json_response # ensure the user is displayed an error that indicates the category option # is problematic assert 'Invalid argument: category' in json_response['error'] # ensure the user is displayed an error that indicates the problematic value assert 'Targaryen' in json_response['error']
import runway from runway.data_types import category, vector, image from your_code import model @runway.setup def setup(): return model() sample_inputs= { "z": vector(length=512, description="The seed used to generate an output image."), "category": category(choices=["day", "night"]) } sample_outputs = { "image": image(width=1024, height=1024) } # Descriptions are used to document each data type and `@runway.command()`; Their values appear # in the app as tooltips. Writing detailed descriptions for each model option goes a long way # towards helping users learn how to interact with your model. Write descriptions as full # sentences. sample_description = "Generate a new image based on a z-vector and an output style: Day or night." @runway.command("sample", inputs=sample_inputs, outputs=sample_outputs, description=sample_description) def sample(model, inputs): # The parameters passed to a function decorated by @runway.command() are: # 1. The return value of a function wrapped by @runway.setup(), usually a model. # 2. The inputs sent with the HTTP request to the /<command_name> endpoint, # as defined by the inputs keyword argument delivered to @runway.command().
import runway from runway.data_types import category, vector, image from your_image_generation_model import big_model, little_model # The setup() function runs once when the model is initialized, and will run # again for each well formed HTTP POST request to http://localhost:9000/setup. @runway.setup(options={'model_size': category(choices=['big', 'little'])}) def setup(opts): if opts['model_size'] == 'big': return big_model() else: return little_model() inputs = {'noise_vector': vector(length=128, description='A random seed.')} outputs = {'image': image(width=512, height=512)} # The @runway.command() decorator is used to create interfaces to call functions # remotely via an HTTP endpoint. This lets you send data to, or get data from, # your model. Each command creates an HTTP route that the Runway app will use # to communicate with your model (e.g. POST /generate). Multiple commands # can be defined for the same model. @runway.command('generate', inputs=inputs, outputs=outputs, description='Generate an image.') def generate(model, input_args): # Functions wrapped by @runway.command() receive two arguments: # 1. Whatever is returned by a function wrapped by @runway.setup(),
# `pip install runway-python`. import runway from runway.data_types import category, image, number, boolean import numpy as np import tensorflow as tf from PIL import Image # from example_model import ExampleModel import lucid.modelzoo.vision_models as models from lucid.misc.io import show import lucid.optvis.param as param import lucid.optvis.render as render import lucid.optvis.transform as transform setup_options = { "network": category(choices=["InceptionV1"], default="InceptionV1") } @runway.setup(options=setup_options) def setup(opts): msg = '[SETUP] Ran with options: network = {}' print(msg.format(opts['network'])) model = models.InceptionV1() model.load_graphdef() return model input_options = { 'layer':
def test_model_setup_and_command(): # use a dict to share state across function scopes. This makes up for the # fact that Python 2.x doesn't have support for the 'nonlocal' keyword. closure = dict(setup_ran=False, command_ran=False) expected_manifest = { 'modelSDKVersion': model_sdk_version, 'millisRunning': None, 'millisSinceLastCommand': None, 'GPU': os.environ.get('GPU', False), 'options': [{ 'type': 'category', 'name': 'size', 'oneOf': ['big', 'small'], 'default': 'big', 'description': 'The size of the model. Bigger is better but also slower.', }], 'commands': [{ 'name': 'test_command', 'description': None, 'inputs': [{ 'type': 'text', 'name': 'input', 'description': 'Some input text.', 'default': '', 'minLength': 0 }], 'outputs': [{ 'type': 'number', 'name': 'output', 'description': 'An output number.', 'default': 0 }] }] } rw = RunwayModel() description = 'The size of the model. Bigger is better but also slower.' @rw.setup(options={ 'size': category(choices=['big', 'small'], description=description) }) def setup(opts): closure['setup_ran'] = True return {} inputs = {'input': text(description='Some input text.')} outputs = {'output': number(description='An output number.')} # Python 2.7 doesn't seem to handle emoji serialization correctly in JSON, # so we will only test emoji serialization/deserialization in Python 3 if sys.version_info[0] < 3: description = 'Sorry, Python 2 doesn\'t support emoji very well' else: description = 'A test command whose description contains emoji 🕳' expected_manifest['commands'][0]['description'] = description @rw.command('test_command', inputs=inputs, outputs=outputs, description=description) def test_command(model, opts): closure['command_ran'] = True return 100 rw.run(debug=True) client = get_test_client(rw) response = client.get('/meta') assert response.is_json manifest = json.loads(response.data) # unset millisRunning as we can't reliably predict this value. # testing that it is an int should be good enough. assert type(manifest['millisRunning']) == int manifest['millisRunning'] = None assert manifest == expected_manifest # TEMPORARILY CHECK / PATH IN ADDITION TO /meta ---------------------------- # ... sorry for the gross dupe code ;) response = client.get('/') assert response.is_json manifest = json.loads(response.data) # unset millisRunning as we can't reliably predict this value. # testing that it is an int should be good enough. assert type(manifest['millisRunning']) == int manifest['millisRunning'] = None assert manifest == expected_manifest # -------------------------------------------------------------------------- # check the input/output manifest for GET /test_command response = client.get('/test_command') assert response.is_json command_manifest = json.loads(response.data) assert command_manifest == expected_manifest['commands'][0] post_data = {'input': 'test input'} response = client.post('/test_command', json=post_data) assert response.is_json assert json.loads(response.data) == {'output': 100} # now that we've run a command lets make sure millis since last command is # a number manifest_after_command = get_manifest(client) assert type(manifest_after_command['millisSinceLastCommand']) == int assert closure['command_ran'] == True assert closure['setup_ran'] == True
# Import the Runway SDK. Please install it first with # `pip install runway-python`. import runway from runway.data_types import number, text, image, category from model import YOLACT_MODEL from PIL import Image # Setup the model, initialize weights, set the configs of the model, etc. # Every model will have a different set of configurations and requirements. # Check https://docs.runwayapp.ai/#/python-sdk to see a complete list of # supported configs. The setup function should return the model ready to be # used. setup_options = { 'threshold': number(min=0, max=1, step=0.1, default=0.3), 'checkpoint': runway.file(extension='.pth'), 'mode': category(choices=["mask_only", "box_only", "both"], default="both"), } @runway.setup(options=setup_options) def setup(opts): model = YOLACT_MODEL(opts) return model # Every model needs to have at least one command. Every command allows to send # inputs and process outputs. To see a complete list of supported inputs and # outputs data types: https://sdk.runwayml.com/en/latest/data_types.html @runway.command(name='detect', inputs={ 'input_image': image(width=550, height=550) }, outputs={ 'output_image': image(width=550, height=550) }) def detect(model, args): # Generate a PIL or Numpy image based on the input caption, and return it
import runway from runway.data_types import category # if no default value is specified, the first element in the choices # list will be used cat = category(choices=["rgb", "bgr", "rgba", "bgra"], default="rgba") @runway.setup(options={"pixel_order": cat}) def setup(opts): print("The selected pixel order is {}".format(opts["pixel_order"])) runway.run() # curl -H "content-type: application/json" -d '{"pixel_order": "bgr"}' http://localhost:8000/setup
import runway from runway.data_types import category from your_code import model # Descriptions are used to document each data type; Their values appear in the app as # tooltips. Writing detailed descriptions for each model option goes a long way towards # helping users learn how to interact with your model. Write descriptions as full sentences. network_size_description = "The size of the network. A larger number will result in" \ "better accuracy at the expense of increased latency." options = { "network_size": category(choices=[64, 128, 256, 512], default=256, description=network_size_description) } @runway.setup(options=options) def setup(opts): print("Setup ran, and the network size is {}".format(opts["network_size"])) return model(network_size=opts["network_size"])
from predictor import COCODemo import numpy as np import torch import runway from runway.data_types import image, category, number from constants import CATEGORIES architectures = [ 'R-50-C4', 'R-50-FPN', 'R-101-FPN', 'X-101-32x8d-FPN', 'R-50-C4', 'R-50-FPN', 'R-101-FPN', 'X-101-32x8d-FPN' ] @runway.setup( options={ 'architecture': category(choices=architectures, default='R-50-FPN'), 'confidenceThreshold': number(min=0, max=1, step=0.1, default=0.7) }) def setup(opts): config_file = "configs/caffe2/e2e_mask_rcnn_%s_1x_caffe2.yaml" % opts[ 'architecture'].replace('-', '_') cfg.merge_from_file(config_file) if not torch.cuda.is_available(): cfg.merge_from_list(["MODEL.DEVICE", "cpu"]) model = COCODemo( cfg, confidence_threshold=opts['confidenceThreshold'], ) return model
import runway from runway.data_types import file, category inputs = {"directory": file(is_directory=True)} outputs = {"result": category(choices=["success", "failure"])} @runway.command("batch_process", inputs=inputs, outputs=outputs) def batch_process(result_of_setup, args): result = True # do_something_with(args["directory"]) return {"result": "success" if result else "failure"} runway.run() # curl -H "content-type: application/json" -d '{"directory": "test"}' http://localhost:8000/batch_process
import process_stylization from photo_wct import PhotoWCT from PIL import Image import runway from runway.data_types import image, category PRETRAINED_MODEL_PATH = './PhotoWCTModels/photo_wct.pth' @runway.setup( options={ 'propagation_mode': category(description='Speeds up the propagation step by ' 'using the guided image filtering algorithm', choices=['fast', 'slow'], default='fast') }) def setup(opts): p_wct = PhotoWCT() p_wct.load_state_dict(torch.load(PRETRAINED_MODEL_PATH)) if opts['propagation_mode'] == 'fast': from photo_gif import GIFSmoothing p_pro = GIFSmoothing(r=35, eps=0.001) else: from photo_smooth import Propagator p_pro = Propagator() if torch.cuda.is_available(): p_wct.cuda(0)
input_list = { 'content_image': image, 'style_image_1': image, 'original_colors': boolean(default=False), 'style_only': boolean(default=False), 'max_iterations': number(min=50, max=1500, step=50, default=500, description='Iterations'), 'content_layer': category(choices=[ 'conv1_1', 'conv1_2', 'conv2_1', 'conv2_2', 'conv3_1', 'conv3_2', 'conv4_1', 'conv4_2', 'conv4_3', 'conv4_4', 'conv5_1', 'conv5_2' ], default='conv4_2', description='what VGG19 layer to use for content'), 'style_scale': number(min=0.1, max=2.0, step=.05, default=1.0, description='Scale of style images.'), } @runway.command( name='generate', inputs=input_list, outputs={'image': image()},
inputs={ 'image': image(), 'resize': number(default=0.5, min=0, max=1, step=0.1), 'fps': number(default=24, min=1, max=120, step=1), 'length_sec': number(default=2, min=1, max=10, step=0.2, description='Output video length in seconds.'), 'effect_type': category(choices=effect_types, default=effect_types[1], description='Video effect.'), 'effect_size': number(default=0.5, min=0, max=1, step=0.1), 'reuse': boolean( default=False, description='Reuse depth map and continue from previous iteration.' ), }, outputs={'image': image()}, description='Cartoonize.') def paint(model, args): x_shift = 0 y_shift = 0 z_shift = 0
"in properly.\n" \ "Video is optimized for smooth, consistent and flicker-free video." render_factor_description = "The default value of 35 has been carefully chosen and should work -ok- for most " \ "scenarios (but probably won't be the -best-). This determines resolution at which " \ "the color portion of the image is rendered. Lower resolution will render faster, and " \ "colors also tend to look more vibrant. Older and lower quality images in particular" \ " will generally benefit by lowering the render factor. Higher render factors are often " \ "better for higher quality images, but the colors may get slightly washed out." @runway.setup( options={ "architecture": category(description=architecture_description, choices=['Artistic', 'Stable', 'Video'], default='Artistic') }) def setup(opts): architecture = opts['architecture'] print('[SETUP] Ran with architecture "{}"'.format(architecture)) if architecture == 'Artistic': colorizer = get_image_colorizer(artistic=True) elif architecture == 'Stable': colorizer = get_image_colorizer(artistic=False) else: colorizer = get_video_colorizer().vis return colorizer
import runway from runway.data_types import category from your_code import model options = {"network_size": category(choices=[64, 128, 256, 512], default=256)} @runway.setup(options=options) def setup(opts): print("Setup ran, and the network size is {}".format(opts["network_size"])) return model(network_size=opts["network_size"])
# used. setup_options = { 'truncation': number(min=1, max=10, step=1, default=5, description='Example input.'), 'seed': number(min=0, max=1000000, description='A seed used to initialize the model.') } @runway.setup(options=setup_options) def setup(opts): msg = '[SETUP] Ran with options: seed = {}, truncation = {}' print(msg.format(opts['seed'], opts['truncation'])) model = ExampleModel(opts) return model inputs = { # 'file': file(extension=".zip"), 'image': image(), 'model': category(choices=["none", "random", "color", "bit/m-r101x1", "vgg16"], default="color", description='Cluster model.'), 'slices': number(min=5, max=30, step=5, default=10, description='Number of slices.'), 'vgg_depth': number(min=1, max=8, step=1, default=7, description='VGG Feature Depth'), } # Every model needs to have at least one command. Every command allows to send # inputs and process outputs. To see a complete list of supported inputs and # outputs data types: https://sdk.runwayml.com/en/latest/data_types.html @runway.command(name='generate', inputs=inputs, outputs={ 'image': image(width=512, height=512), 'info': text("hello") }, description='Generates a red square when the input text input is "red".') def generate(model, args): print('[GENERATE] Ran with image "{}"'.format(args['image'])) # Generate a PIL or Numpy image based on the input caption, and return it output_image = model.run_on_input(args['image'], args['slices'], args['model'], args['vgg_depth'])
import torch import process_stylization from photo_wct import PhotoWCT from PIL import Image import runway from runway.data_types import image, category PRETRAINED_MODEL_PATH = './PhotoWCTModels/photo_wct.pth' @runway.setup(options={'propagation_mode': category(description='Speeds up the propagation step by ' 'using the guided image filtering algorithm', choices=['fast', 'slow'], default='fast')}) def setup(opts): p_wct = PhotoWCT() p_wct.load_state_dict(torch.load(PRETRAINED_MODEL_PATH)) if opts['propagation_mode'] == 'fast': from photo_gif import GIFSmoothing p_pro = GIFSmoothing(r=35, eps=0.001) else: from photo_smooth import Propagator p_pro = Propagator() if torch.cuda.is_available(): p_wct.cuda(0) return {