Exemple #1
0
        colorizer = get_image_colorizer(artistic=True)
    elif architecture == 'Stable':
        colorizer = get_image_colorizer(artistic=False)
    else:
        colorizer = get_video_colorizer().vis
    return colorizer


@runway.command(name='generate',
                inputs={
                    'image':
                    image(description='Image to colorize'),
                    'render_factor':
                    number(description=render_factor_description,
                           min=7,
                           max=45,
                           step=1,
                           default=35)
                },
                outputs={'image': image(description='Colorized image')})
def generate(model, args):
    render_factor = args['render_factor']
    print('[GENERATE] Ran with render_factor "{}"'.format(render_factor))

    orig_image = args['image'].convert('RGB')
    model._clean_mem()
    output_image = model.filter.filter(orig_image,
                                       orig_image,
                                       render_factor=render_factor)

    return {'image': output_image}
Exemple #2
0
def test_meta(capsys):

    rw = RunwayModel()

    @rw.setup(options={'initialization_array': array(item_type=text)})
    def setup(opts):
        pass

    kwargs_1 = {
        'inputs': {
            'image': image,
            'vector': vector(length=5)
        },
        'outputs': {
            'label': text
        }
    }

    @rw.command('command_1', **kwargs_1)
    def command_1(opts):
        pass

    kwargs_2 = {
        'description': 'This command is used for testing.',
        'inputs': {
            'any': any_type,
            'file': file
        },
        'outputs': {
            'number': number(min=10, max=100)
        }
    }

    @rw.command('command_2', **kwargs_2)
    def command_2(opts):
        pass

    expected_manifest = {
        'options': [{
            'minLength': 0,
            'type': 'array',
            'name': 'initialization_array',
            'description': None,
            'itemType': {
                'default': '',
                'minLength': 0,
                'type': 'text',
                'name': 'text_array_item',
                'description': None
            }
        }],
        'commands': [{
            'name':
            'command_2',
            'description':
            'This command is used for testing.',
            'inputs': [
                {
                    'type': 'any',
                    'name': 'any',
                    'description': None,
                },
                {
                    'type': 'file',
                    'name': 'file',
                    'description': None,
                },
            ],
            'outputs': [
                {
                    'name': 'number',
                    'min': 10,
                    'default': 10,
                    'max': 100,
                    'type': 'number',
                    'description': None
                },
            ]
        }, {
            'name':
            'command_1',
            'description':
            None,
            'inputs': [
                {
                    'channels': 3,
                    'type': 'image',
                    'name': 'image',
                    'description': None,
                    'defaultOutputFormat': 'JPEG'
                },
                {
                    'samplingMean': 0,
                    'length': 5,
                    'type': 'vector',
                    'name': 'vector',
                    'samplingStd': 1,
                    'default': None,
                    'description': None
                },
            ],
            'outputs': [{
                'default': '',
                'minLength': 0,
                'type': 'text',
                'name': 'label',
                'description': None
            }]
        }]
    }

    # RW_META should not be set during testing
    os.environ['RW_META'] = '1'

    rw.run(debug=True,
           model_options={'initialization_array': ['one', 'two', 'three']})
    std = capsys.readouterr()
    manifest = json.loads(std.out.strip('\n'))

    # DeepDiff is required here because Python2 handles stdin encoding strangely
    # and because dict order is not guaranteed in Python2. I ran up a tree
    # trying to get this comparison working without relying on a lib, but
    # ultimately it was just wasting my time.
    diff = DeepDiff(manifest, expected_manifest, ignore_order=True)
    assert len(diff.keys()) == 0
    assert std.err == ''

    os.environ['RW_META'] = '0'
Exemple #3
0
# =========================================================================

# Import the Runway SDK. Please install it first with
# `pip install runway-python`.
import runway
from runway.data_types import number, text, image
from example_model import ExampleModel

# Setup the model, initialize weights, set the configs of the model, etc.
# Every model will have a different set of configurations and requirements.
# Check https://docs.runwayapp.ai/#/python-sdk to see a complete list of
# supported configs. The setup function should return the model ready to be
# used.
setup_options = {
    'truncation':
    number(min=1, max=10, step=1, default=5, description='Example input.'),
    'seed':
    number(min=0,
           max=1000000,
           description='A seed used to initialize the model.')
}


@runway.setup(options=setup_options)
def setup(opts):
    msg = '[SETUP] Ran with options: seed = {}, truncation = {}'
    print(msg.format(opts['seed'], opts['truncation']))
    model = ExampleModel(opts)
    return model

Exemple #4
0
# =========================================================================

# Import the Runway SDK. Please install it first with
# `pip install runway-python`.
import runway
from runway.data_types import number, text, image, category
from example_model import ExampleModel

# Setup the model, initialize weights, set the configs of the model, etc.
# Every model will have a different set of configurations and requirements.
# Check https://docs.runwayapp.ai/#/python-sdk to see a complete list of
# supported configs. The setup function should return the model ready to be
# used.
setup_options = {
    'truncation': number(min=1, max=10, step=1, default=5, description='Example input.'),
    'seed': number(min=0, max=1000000, description='A seed used to initialize the model.')
}
@runway.setup(options=setup_options)
def setup(opts):
    msg = '[SETUP] Ran with options: seed = {}, truncation = {}'
    print(msg.format(opts['seed'], opts['truncation']))
    model = ExampleModel(opts)
    return model

inputs = {
    # 'file': file(extension=".zip"),
    'image': image(),
    'model': category(choices=["none", "random", "color", "bit/m-r101x1", "vgg16"], default="color", description='Cluster model.'),
    'slices': number(min=5, max=30, step=5, default=10, description='Number of slices.'),
    'vgg_depth': number(min=1, max=8, step=1, default=7, description='VGG Feature Depth'),
Exemple #5
0

input_options = {
    'layer':
    category(choices=[
        "conv2d0 (max:64)", "maxpool0 (max:64)", "conv2d1 (max:64)",
        "conv2d2 (max:192)", "maxpool1 (max:192)", "mixed3a (max:256)",
        "mixed3b (max:480)", "maxpool4 (max:480)", "mixed4a (max:508)",
        "mixed4b (max:512)", "mixed4c (max:512)", "mixed4d (max:528)",
        "mixed4e (max:832)", "maxpool10 (max:832)", "mixed5a (max:832)",
        "mixed5b (max:1024)"
    ],
             default="mixed5b (max:1024)",
             description='choose layer of network to visualize'),
    'neuron':
    number(default=0, min=0, max=1023, step=1, description='Neuron ID'),
    'size':
    number(default=128, min=128, max=1024, step=128, description='Image Size'),
    'transforms':
    boolean(default=False, description='Vary size of visualization'),
    'transform_min':
    number(default=0.3,
           min=0.0,
           max=1.0,
           step=.1,
           description='Minimum scaling amount'),
    'transform_max':
    number(default=0.5,
           min=0.0,
           max=1.0,
           step=.1,
Exemple #6
0
# =========================================================================

# Import the Runway SDK. Please install it first with
# `pip install runway-python`.
import runway
from runway.data_types import number, text, image
from example_model import ExampleModel
from test_video import 

# Setup the model, initialize weights, set the configs of the model, etc.
# Every model will have a different set of configurations and requirements.
# Check https://docs.runwayapp.ai/#/python-sdk to see a complete list of
# supported configs. The setup function should return the model ready to be
# used.
setup_options = {
    'truncation': number(min=1, max=10, step=1, default=5, description='Example input.'),
    'seed': number(min=0, max=1000000, description='A seed used to initialize the model.')
}
@runway.setup(options=setup_options)
def setup(opts):
    msg = '[SETUP] Ran with options: seed = {}, truncation = {}'
    print(msg.format(opts['seed'], opts['truncation']))
    model = ExampleModel(opts)
    return model

# Every model needs to have at least one command. Every command allows to send
# inputs and process outputs. To see a complete list of supported inputs and
# outputs data types: https://sdk.runwayml.com/en/latest/data_types.html
@runway.command(name='generate',
                inputs={ 'caption': text() },
                outputs={ 'image': image(width=512, height=512) },
Exemple #7
0
import logging
import argparse

import runway
from runway.data_types import number, text, image, file
import argparse


def sample(model, img):
    # Ensure imaize is under 1024
    if img.size[0] > 1024 or img.size[1] > 1024:
        img.thumbnail((1024, 1024))
    return model.run(np.array(img)).convert("L")


@runway.setup(options={"onnx": number(default=0)})
def setup(opts):
    import basnet
    return basnet


@runway.command(name='paste',
                inputs={'image': image},
                outputs={'image': image(channels=4)})
def paste(model, inputs):
    start = time.time()
    logging.info('generating mask...')
    img = inputs['image']
    if img.size[0] > 1024 or img.size[1] > 1024:
        img.thumbnail((1024, 1024))
    mask = sample(model, img)
# =========================================================================

# Import the Runway SDK. Please install it first with
# `pip install runway-python`.
import runway
from runway.data_types import number, text, image
from example_model import ExampleModel
from PIL import Image

# Setup the model, initialize weights, set the configs of the model, etc.
# Every model will have a different set of configurations and requirements.
# Check https://docs.runwayapp.ai/#/python-sdk to see a complete list of
# supported configs. The setup function should return the model ready to be
# used.
setup_options = {
    'truncation': number(min=5, max=100, step=1, default=10),
    'seed': number(min=0, max=1000000)
}


@runway.setup(options=setup_options)
def setup(opts):
    msg = '[SETUP] Ran with options: seed = {}, truncation = {}'
    print(msg.format(opts['seed'], opts['truncation']))
    model = ExampleModel(opts)
    return model


# Every model needs to have at least one command. Every command allows to send
# inputs and process outputs. To see a complete list of supported inputs and
# outputs data types: https://sdk.runwayml.com/en/latest/data_types.html
Exemple #9
0
def setup():
    model = NeuralStyle()
    return model


input_list = {
    'content_image':
    image,
    'style_image_1':
    image,
    'original_colors':
    boolean(default=False),
    'style_only':
    boolean(default=False),
    'max_iterations':
    number(min=50, max=1500, step=50, default=500, description='Iterations'),
    'content_layer':
    category(choices=[
        'conv1_1', 'conv1_2', 'conv2_1', 'conv2_2', 'conv3_1', 'conv3_2',
        'conv4_1', 'conv4_2', 'conv4_3', 'conv4_4', 'conv5_1', 'conv5_2'
    ],
             default='conv4_2',
             description='what VGG19 layer to use for content'),
    'style_scale':
    number(min=0.1,
           max=2.0,
           step=.05,
           default=1.0,
           description='Scale of style images.'),
}
    "inception4c": model.inception4c,
    "inception4d": model.inception4d,
    "inception4e": model.inception4e,
    "inception5a": model.inception5a,
    "inception5b": model.inception5b,
}

config = {
    "image_path": None,
    "layers": [model.inception4b],
    "custom_func": [None]
}

input_dict = {
    "image": image(),
    "octave_scale": number(step=0.05, min=1.0, max=1.7, default=1.2),
    "num_octaves": number(step=1, min=1, max=25, default=5),
    "iterations": number(step=1, min=1, max=100, default=14),
    "lr": number(step=1e-4, min=1e-9, max=1e-1, default=0.05),
    "max_rotation": number(step=0.1, min=0.0, max=1.5, default=0.9),
    "layer_index": number(step=1, min=0, max=len(layers), default=0),
    "channel_index": number(step=1, min=-1, max=511, default=0),
    "invert_mask": boolean(default=False)
}


@runway.setup
def setup():
    dreamy_boi = dreamer(model)
    return dreamy_boi
}


@runway.setup(options=setup_options)
def setup(opts):
    model = PhotoInpaintModel()
    return model


@runway.command(
    name='paint',
    inputs={
        'image':
        image(),
        'resize':
        number(default=0.5, min=0, max=1, step=0.1),
        'fps':
        number(default=24, min=1, max=120, step=1),
        'length_sec':
        number(default=2,
               min=1,
               max=10,
               step=0.2,
               description='Output video length in seconds.'),
        'effect_type':
        category(choices=effect_types,
                 default=effect_types[1],
                 description='Video effect.'),
        'effect_size':
        number(default=0.5, min=0, max=1, step=0.1),
        'reuse':
Exemple #12
0
    "inception4c": model.inception4c,
    "inception4d": model.inception4d,
    "inception4e": model.inception4e,
    "inception5a": model.inception5a,
    "inception5b": model.inception5b,
}

config = {
    "image_path": None,
    "layers": [model.inception4b],
    "custom_func": [None]
}

input_dict = {
    "image": image(),
    "octave_scale": number(step=0.05, min=1.0, max=1.7, default=1.2),
    "num_octaves": number(step=1, min=1, max=25, default=5),
    "iterations": number(step=1, min=1, max=100, default=14),
    "lr": number(step=1e-4, min=1e-9, max=1e-1, default=0.05),
    "max_rotation": number(step=0.1, min=0.0, max=1.5, default=0.9)
}

for key in list(layers.keys()):
    input_dict[key] = boolean(default=False)


@runway.setup
def setup():
    dreamy_boi = dreamer(model)
    return dreamy_boi
from cartoonize_model import CartoonizeModel

setup_options = {
    'model_path': text(description='Model path. Empty string = default model.'),
}
@runway.setup(options=setup_options)
def setup(opts):
    model = CartoonizeModel({
        'model_path': opts['model_path'] or 'WBCartoonization/test_code/saved_models'
    })
    return model

@runway.command(name='cartoonize',
        inputs={
            'image': image(),
            'resize': number(default=50, min=0, max=100, step=1)
        },
        outputs={
            'image': image()
        },
        description='Cartoonize.')
def cartoonize(model, args):
    output_image = model.cartoonize(args['image'], {
        'resize': args['resize'] / 100
    })
    return {
        'image': output_image
    }

if __name__ == '__main__':
    runway.run(host='0.0.0.0', port=8000)
Exemple #14
0
def test_model_setup_and_command():

    # use a dict to share state across function scopes. This makes up for the
    # fact that Python 2.x doesn't have support for the 'nonlocal' keyword.
    closure = dict(setup_ran=False, command_ran=False)

    expected_manifest = {
        'modelSDKVersion':
        model_sdk_version,
        'millisRunning':
        None,
        'millisSinceLastCommand':
        None,
        'GPU':
        os.environ.get('GPU', False),
        'options': [{
            'type':
            'category',
            'name':
            'size',
            'oneOf': ['big', 'small'],
            'default':
            'big',
            'description':
            'The size of the model. Bigger is better but also slower.',
        }],
        'commands': [{
            'name':
            'test_command',
            'description':
            None,
            'inputs': [{
                'type': 'text',
                'name': 'input',
                'description': 'Some input text.',
                'default': '',
                'minLength': 0
            }],
            'outputs': [{
                'type': 'number',
                'name': 'output',
                'description': 'An output number.',
                'default': 0
            }]
        }]
    }

    rw = RunwayModel()

    description = 'The size of the model. Bigger is better but also slower.'

    @rw.setup(options={
        'size':
        category(choices=['big', 'small'], description=description)
    })
    def setup(opts):
        closure['setup_ran'] = True
        return {}

    inputs = {'input': text(description='Some input text.')}
    outputs = {'output': number(description='An output number.')}

    # Python 2.7 doesn't seem to handle emoji serialization correctly in JSON,
    # so we will only test emoji serialization/deserialization in Python 3
    if sys.version_info[0] < 3:
        description = 'Sorry, Python 2 doesn\'t support emoji very well'
    else:
        description = 'A test command whose description contains emoji 🕳'
    expected_manifest['commands'][0]['description'] = description

    @rw.command('test_command',
                inputs=inputs,
                outputs=outputs,
                description=description)
    def test_command(model, opts):
        closure['command_ran'] = True
        return 100

    rw.run(debug=True)

    client = get_test_client(rw)

    response = client.get('/meta')
    assert response.is_json

    manifest = json.loads(response.data)

    # unset millisRunning as we can't reliably predict this value.
    # testing that it is an int should be good enough.
    assert type(manifest['millisRunning']) == int
    manifest['millisRunning'] = None

    assert manifest == expected_manifest

    # TEMPORARILY CHECK / PATH IN ADDITION TO /meta ----------------------------
    # ... sorry for the gross dupe code ;)
    response = client.get('/')
    assert response.is_json

    manifest = json.loads(response.data)

    # unset millisRunning as we can't reliably predict this value.
    # testing that it is an int should be good enough.
    assert type(manifest['millisRunning']) == int
    manifest['millisRunning'] = None

    assert manifest == expected_manifest
    # --------------------------------------------------------------------------

    # check the input/output manifest for GET /test_command
    response = client.get('/test_command')
    assert response.is_json

    command_manifest = json.loads(response.data)
    assert command_manifest == expected_manifest['commands'][0]

    post_data = {'input': 'test input'}
    response = client.post('/test_command', json=post_data)
    assert response.is_json
    assert json.loads(response.data) == {'output': 100}

    # now that we've run a command lets make sure millis since last command is
    # a number
    manifest_after_command = get_manifest(client)
    assert type(manifest_after_command['millisSinceLastCommand']) == int

    assert closure['command_ran'] == True
    assert closure['setup_ran'] == True
Exemple #15
0
import runway
from runway.data_types import number, image
import random
from generate_for_runway import *
input = {"z": number(random.randint(1, 1000))}

setup_options = {'truncation': number(min=5, max=100, step=1, default=5)}


@runway.setup(options=setup_options)
def setup(opts):
    return opts


@runway.command(name='generate', inputs=input, outputs={'image': image})
def generate_(model, args):
    # _run_cmd('export CUDA_HOME=/usr/local/cuda')
    # _run_cmd('export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64')
    # _run_cmd('export PATH=$PATH:$CUDA_HOME/bin')
    _run_cmd('nvidia-smi')
    network_pkl = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/ffhq.pkl'
    seed = args['z']
    trunc = 0.5
    output_image = generate_images(network_pkl, seed, trunc)
    return {'image': output_image}


def _run_cmd(cmd):
    with os.popen(cmd) as pipe:
        output = pipe.read()
        status = pipe.close()
Exemple #16
0
import runway
from runway.data_types import text, number

import gzip

from markovify import Text

@runway.setup
def setup():
    msg = '[SETUP]'
    with gzip.open('./novel-model-markovify.json.gz') as fh:
        return Text.from_json(fh.read())

@runway.command(
    name='generate',
    inputs={'max_len': number(default=80, min=10, max=1000),
        'seed': number(default=0, min=0, max=1e6)},
    outputs={'output': text()})
def generate(model, args):
    print('[GENERATE] Ran with max_len value "{}"'.format(args['max_len']))
    output = model.make_short_sentence(args['max_len'])
    return {'output': output}

if __name__ == '__main__':
    runway.run(host='0.0.0.0', port=8000)

Exemple #17
0
# Import the Runway SDK. Please install it first with
# `pip install runway-python`.
import runway
from runway.data_types import number, text, image, vector
from example_model import CPPNModel

# Setup the model, initialize weights, set the configs of the model, etc.
# Every model will have a different set of configurations and requirements.
# Check https://docs.runwayapp.ai/#/python-sdk to see a complete list of
# supported configs. The setup function should return the model ready to be
# used.


setup_options = {
    'mode': text(default='tanh'),
    'seed': number(min=0, max=1000000, default=5, description='A seed used to initialize the model.'),
    'resolution': number(min=32, max=1024, default=64, description='output image size')
}

res = 64

@runway.setup(options=setup_options)
def setup(opts):
    msg = '[SETUP] Ran with options: mode = {}, seed = {}, res = {}'
    print(msg.format(opts['mode'], opts['seed'], opts['resolution']))
    res = opts['resolution']
    model = CPPNModel(opts)
    return model


# Every model needs to have at least one command. Every command allows to send
import torch
import runway
from runway.data_types import number, text
from distilgpt2_model import DistilGPT2Model


@runway.setup(options={'max_len': number(default=1000, min=100, max=5000)})
def setup(opts):
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    print(f"Using device={device}, max_len={opts['max_len']}")
    model = DistilGPT2Model(device=device, max_len=opts['max_len'])
    return model


inputs = {'prompt': text}
outputs = {'generated': text}


@runway.command('generate',
                inputs=inputs,
                outputs=outputs,
                description='Generate an image.')
def generate(model, input_args):
    return model.generate(input_args['prompt'])


if __name__ == '__main__':
    runway.run(host='0.0.0.0', port=9000)

@runway.setup
def setup():
    tflib.init_tf()
    with dnnlib.util.open_url(URL_FFHQ) as f:
        generator_network, discriminator_network, Gs_network = pickle.load(f)
    generator = Generator(Gs_network, 1, randomize_noise=False)
    perceptual_model = PerceptualModel(256, layer=9, batch_size=1)
    perceptual_model.build_perceptual_model(generator.generated_image)
    return perceptual_model, generator


INPUTS = {
    'reference': image,
    'iterations': number(default=500, min=1, max=10000),
    'learning_rate': number(default=1, step=0.01, min=0, max=3)
}


@runway.command('autoencode', inputs=INPUTS, outputs={'output': vector(512)})
def autoencode(model, inputs):
    perceptual_model, generator = model
    perceptual_model.set_reference_images([inputs['reference']])
    op = perceptual_model.optimize(generator.dlatent_variable,
                                   iterations=inputs['iterations'],
                                   learning_rate=inputs['learning_rate'])
    pbar = tqdm(op, leave=False, total=inputs['iterations'])
    print('Processing image...')
    for loss in pbar:
        pbar.set_description('Loss: %.2f' % loss)
Exemple #20
0
# Copyright (c) 2021 Justin Pinkney

import runway
from runway.data_types import category, vector, image, number

import editor
import face_detection

edit_controls = {k: number(description=k, default=0, min=-20, max=20) for k in editor.edits.keys()}
inputs = {'original': image()}
inputs.update(edit_controls)
outputs = { 'image': image() }


@runway.setup(options={
        'checkpoint': runway.file(extension='.pt', default="psp_ffhq_encode.pt"),
        'face_detector': runway.file(extension='.dat', default="shape_predictor_5_face_landmarks.dat"),
    })
def setup(opts):
    checkpoint_path = opts['checkpoint']
    face_detection.MODEL_PATH = opts['face_detector']

    encoder, decoder, latent_avg = editor.load_model(checkpoint_path)

    manipulator = editor.manipulate_model(decoder)
    manipulator.edits = {editor.idx_dict[v[0]]: {v[1]: 0} for k, v in editor.edits.items()}

    return encoder, decoder, latent_avg, manipulator


@runway.command('encode', inputs=inputs, outputs=outputs, description='Generate an image.')
import numpy as np
import torch
import runway
from runway.data_types import image, category, number
from constants import CATEGORIES

architectures = [
    'R-50-C4', 'R-50-FPN', 'R-101-FPN', 'X-101-32x8d-FPN', 'R-50-C4',
    'R-50-FPN', 'R-101-FPN', 'X-101-32x8d-FPN'
]


@runway.setup(
    options={
        'architecture': category(choices=architectures, default='R-50-FPN'),
        'confidenceThreshold': number(min=0, max=1, step=0.1, default=0.7)
    })
def setup(opts):
    config_file = "configs/caffe2/e2e_mask_rcnn_%s_1x_caffe2.yaml" % opts[
        'architecture'].replace('-', '_')
    cfg.merge_from_file(config_file)
    if not torch.cuda.is_available():
        cfg.merge_from_list(["MODEL.DEVICE", "cpu"])
    model = COCODemo(
        cfg,
        confidence_threshold=opts['confidenceThreshold'],
    )
    return model


@runway.command('mask',
    content_tf = test_transform(0, False)
    style_tf = test_transform(0, False)
    return {
      'vgg': vgg,
      'decoder': decoder,
      'content_tf': content_tf,
      'style_tf': style_tf,
    }


@runway.command(name='generate',
                inputs={
                  'content_image': image(description='Content Image'),
                  'style_image': image(description='Style Image'),
                  'preserve_color': boolean(description='Preserve content image color'),
                  'alpha': number(description='Controls the degree of stylization',
                                                 min=0, max=1, step=0.01, default=1)
                },
                outputs={ 'image': image(description='Output image') })
def generate(model, args):
    content_image = args['content_image'].convert('RGB')
    style_image = args['style_image'].convert('RGB')
    preserve_color = args['preserve_color']
    alpha = args['alpha']
    print('[GENERATE] Ran with preserve_color "{}". alpha "{}"'.format(preserve_color, alpha))

    vgg = model['vgg']
    decoder = model['decoder']
    content_tf = model['content_tf']
    style_tf = model['style_tf']

    content = content_tf(content_image)
Exemple #23
0
import runway
from runway.data_types import vector, number
import numpy as np

inputs = {"length": number(min=1)}
outputs = {"vector": vector(length=512)}


@runway.command("random_sample", inputs=inputs, outputs=outputs)
def random_sample(result_of_setup, args):
    # TODO: Come back, I think there is a bug here...
    # we should be returning a serialized version of the data, not a deserialized version...
    vec = vector(length=args["length"])
    rand = np.random.random_sample(args["length"])
    return {"vector": vec.deserialize(rand)}


runway.run()

# curl -H "content-type: application/json" -d '{"length": 128}' http://localhost:8000/random_sample
Exemple #24
0
        }
    else:
        return {}

@runway.setup
def setup():
    global USE_CUDA
    if dlib.cuda.get_num_devices() > 0 and dlib.DLIB_USE_CUDA:
        USE_CUDA = True
        print('CUDA detected, using CNN model...')

# https://github.com/ageitgey/face_recognition/blob/c96b010c02f15e8eeb0f71308c641179ac1f19bb/examples/facerec_from_webcam_faster.py#L60
identify_face_inputs = {
    'input_image': image,
    'label_image': image,
    'match_tolerance': number(min=0.1, max=1.0, step=0.1, default=0.6)
}
identify_face_outputs = {
    'results': array(image_bounding_box),
}
@runway.command('identify_face', inputs=identify_face_inputs, outputs=identify_face_outputs)
def identify_face(model, args):
    input_arr = np.array(args['input_image'])
    label_arr = np.array(args['label_image'])
    input_locations = face_recognition.face_locations(input_arr, **get_model_kwargs())
    input_encodings = face_recognition.face_encodings(input_arr, known_face_locations=input_locations)

    global LAST_LABEL_IMAGE_ARR
    global LAST_LABEL_ENCODINGS
    label_encodings = None
    # if the label image has changed, update the label encodings, otherwise use
Exemple #25
0
# =========================================================================

# Import the Runway SDK. Please install it first with
# `pip install runway-python`.
import runway
from runway.data_types import number, text, image, category
from model import  YOLACT_MODEL
from PIL import Image

# Setup the model, initialize weights, set the configs of the model, etc.
# Every model will have a different set of configurations and requirements.
# Check https://docs.runwayapp.ai/#/python-sdk to see a complete list of
# supported configs. The setup function should return the model ready to be
# used.
setup_options = {
    'threshold': number(min=0, max=1, step=0.1, default=0.3),
    'checkpoint': runway.file(extension='.pth'),
    'mode': category(choices=["mask_only", "box_only", "both"], default="both"),
}
@runway.setup(options=setup_options)
def setup(opts):
    model = YOLACT_MODEL(opts)
    return model

# Every model needs to have at least one command. Every command allows to send
# inputs and process outputs. To see a complete list of supported inputs and
# outputs data types: https://sdk.runwayml.com/en/latest/data_types.html
@runway.command(name='detect',
                inputs={ 'input_image': image(width=550, height=550) },
                outputs={ 'output_image': image(width=550, height=550) })
Exemple #26
0
#
@runway.setup(options=setup_options)
def setup(opts):
    model = TransformersModel(opts)
    return model


# Query a text with a question
@runway.command(name='query',
                inputs={
                    'document': text(),
                    'question': text()
                },
                outputs={
                    'answer': text(),
                    'score': number(),
                    'start': number(),
                    'end': number()
                },
                description='Ask a question about a text.')
#
def query(model, args):
    answer = model.query(args['document'], args['question'])
    return answer


# Sentiment analysis
@runway.command(name='sentiment',
                inputs={'document': text()},
                outputs={
                    'label': text(),
    trainer = MUNIT_Trainer(config)

    state_dict = torch.load(generator_checkpoint_path)
    trainer.gen_a.load_state_dict(state_dict['a'])
    trainer.gen_b.load_state_dict(state_dict['b'])

    return {'model': trainer, 'config': config}


@runway.command(name='generate',
                inputs={
                    'image':
                    image(description='Input image'),
                    'style':
                    number(default=1,
                           min=0,
                           max=1000,
                           description='Style Seed')
                },
                outputs={'image': image(description='Output image')},
                description='Image translation with style seeding')
def generate(model, args):
    #start command here?
    trainer = model['model']
    config = model['config']
    style_dim = config['gen']['style_dim']

    image_in = args['image'].convert('RGB')

    # replace this
    num_style_start = args['style']
    torch.manual_seed(num_style_start)